mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 05:48:47 +01:00
rpc : fix ggml_backend_rpc_supports_buft() (#7918)
This commit is contained in:
parent
a55eb1bf0f
commit
172c825684
@ -624,12 +624,12 @@ GGML_CALL static enum ggml_status ggml_backend_rpc_graph_compute(ggml_backend_t
|
||||
GGML_CALL static bool ggml_backend_rpc_supports_op(ggml_backend_t backend, const ggml_tensor * op) {
|
||||
UNUSED(backend);
|
||||
UNUSED(op);
|
||||
GGML_ASSERT(false && "not implemented");
|
||||
return false;
|
||||
//TODO: call the remote backend and cache the results
|
||||
return true;
|
||||
}
|
||||
|
||||
GGML_CALL static bool ggml_backend_rpc_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
|
||||
if (buft->iface.get_name == ggml_backend_rpc_buffer_type_name) {
|
||||
if (buft->iface.get_name != ggml_backend_rpc_buffer_type_name) {
|
||||
return false;
|
||||
}
|
||||
ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context;
|
||||
|
Loading…
Reference in New Issue
Block a user