diff --git a/examples/server/README.md b/examples/server/README.md index e56ca063a..1559dd3f2 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -16,6 +16,7 @@ Command line options: - `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended. - `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped. - `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed. +- `--numa`: Attempt optimizations that help on some NUMA systems. - `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains. - `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation. - `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`. diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 2340f93ac..222dbcb43 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -666,6 +666,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, { fprintf(stdout, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } + fprintf(stdout, " --numa attempt optimizations that help on some NUMA systems\n"); #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD fprintf(stdout, " -ngl N, --n-gpu-layers N\n"); fprintf(stdout, " number of layers to store in VRAM\n"); @@ -940,6 +941,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, { params.use_mmap = false; } + else if (arg == "--numa") + { + params.numa = true; + } else if (arg == "--embedding") { params.embedding = true;