mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-30 16:07:17 +01:00
0e9f760eb1
* rpc : add backend registry / device interfaces * llama : add llama_supports_rpc API * ggml_backend_rpc_start_rpc_server -> ggml_backend_rpc_start_server
29 lines
774 B
C
29 lines
774 B
C
#pragma once
|
|
|
|
#include "ggml.h"
|
|
#include "ggml-backend.h"
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
#define GGML_RPC_MAX_SERVERS 16
|
|
|
|
// backend API
|
|
GGML_API ggml_backend_t ggml_backend_rpc_init(const char * endpoint);
|
|
GGML_API bool ggml_backend_is_rpc(ggml_backend_t backend);
|
|
|
|
GGML_API ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint);
|
|
|
|
GGML_API void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total);
|
|
|
|
GGML_API void ggml_backend_rpc_start_server(ggml_backend_t backend, const char * endpoint, size_t free_mem, size_t total_mem);
|
|
|
|
GGML_API ggml_backend_reg_t ggml_backend_rpc_reg(void);
|
|
|
|
GGML_API ggml_backend_dev_t ggml_backend_rpc_add_device(const char * endpoint);
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|