From 031fe7225e91a2869647bd12f240e1ea902e30db Mon Sep 17 00:00:00 2001 From: Shouyi <198784@gmail.com> Date: Wed, 26 Jul 2023 07:59:26 +1000 Subject: [PATCH] Add tensor split support for llama.cpp (#3171) --- README.md | 1 + modules/llamacpp_hf.py | 7 +++++++ modules/llamacpp_model.py | 7 +++++++ modules/loaders.py | 2 ++ modules/shared.py | 1 + modules/ui.py | 1 + server.py | 1 + 7 files changed, 20 insertions(+) diff --git a/README.md b/README.md index 456da4ef..aac6039c 100644 --- a/README.md +++ b/README.md @@ -218,6 +218,7 @@ Optionally, you can use the following command-line flags: | `--mlock` | Force the system to keep the model in RAM. | | `--cache-capacity CACHE_CAPACITY` | Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed. | | `--n-gpu-layers N_GPU_LAYERS` | Number of layers to offload to the GPU. Only works if llama-cpp-python was compiled with BLAS. Set this to 1000000000 to offload all layers to the GPU. | +| `--tensor_split TENSOR_SPLIT` | Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17 | | `--n_ctx N_CTX` | Size of the prompt context. | | `--llama_cpp_seed SEED` | Seed for llama-cpp models. Default 0 (random). | | `--n_gqa N_GQA` | grouped-query attention. Must be 8 for llama2 70b. | diff --git a/modules/llamacpp_hf.py b/modules/llamacpp_hf.py index 6a4a0294..5c8c1a7a 100644 --- a/modules/llamacpp_hf.py +++ b/modules/llamacpp_hf.py @@ -94,6 +94,12 @@ class LlamacppHF(PreTrainedModel): model_file = list(path.glob('*ggml*.bin'))[0] logger.info(f"llama.cpp weights detected: {model_file}\n") + + if shared.args.tensor_split is None or shared.args.tensor_split.strip() == '': + tensor_split_list = None + else: + tensor_split_list = [float(x) for x in shared.args.tensor_split.strip().split(",")] + params = { 'model_path': str(model_file), 'n_ctx': shared.args.n_ctx, @@ -104,6 +110,7 @@ class LlamacppHF(PreTrainedModel): 'use_mlock': shared.args.mlock, 'low_vram': shared.args.low_vram, 'n_gpu_layers': shared.args.n_gpu_layers, + 'tensor_split': tensor_split_list, 'rope_freq_base': 10000 * shared.args.alpha_value ** (64/63.), 'rope_freq_scale': 1.0 / shared.args.compress_pos_emb, 'n_gqa': shared.args.n_gqa or None, diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index 0f9c3470..69e1c160 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -41,6 +41,12 @@ class LlamaCppModel: cache_capacity = int(shared.args.cache_capacity) logger.info("Cache capacity is " + str(cache_capacity) + " bytes") + + if shared.args.tensor_split is None or shared.args.tensor_split.strip() == '': + tensor_split_list = None + else: + tensor_split_list = [float(x) for x in shared.args.tensor_split.strip().split(",")] + params = { 'model_path': str(path), 'n_ctx': shared.args.n_ctx, @@ -51,6 +57,7 @@ class LlamaCppModel: 'use_mlock': shared.args.mlock, 'low_vram': shared.args.low_vram, 'n_gpu_layers': shared.args.n_gpu_layers, + 'tensor_split': tensor_split_list, 'rope_freq_base': 10000 * shared.args.alpha_value ** (64/63.), 'rope_freq_scale': 1.0 / shared.args.compress_pos_emb, 'n_gqa': shared.args.n_gqa or None, diff --git a/modules/loaders.py b/modules/loaders.py index c55cf0ff..acb59c65 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -33,6 +33,7 @@ loaders_and_params = { 'n_gqa', 'rms_norm_eps', 'n_gpu_layers', + 'tensor_split', 'n_batch', 'threads', 'no_mmap', @@ -47,6 +48,7 @@ loaders_and_params = { 'n_gqa', 'rms_norm_eps', 'n_gpu_layers', + 'tensor_split', 'n_batch', 'threads', 'no_mmap', diff --git a/modules/shared.py b/modules/shared.py index 614f78b7..f45a5683 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -125,6 +125,7 @@ parser.add_argument('--low-vram', action='store_true', help='Low VRAM Mode') parser.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.') parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.') parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.') +parser.add_argument('--tensor_split', type=str, default=None, help="Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17") parser.add_argument('--n_ctx', type=int, default=2048, help='Size of the prompt context.') parser.add_argument('--llama_cpp_seed', type=int, default=0, help='Seed for llama-cpp models. Default 0 (random)') parser.add_argument('--n_gqa', type=int, default=0, help='grouped-query attention. Must be 8 for llama2 70b.') diff --git a/modules/ui.py b/modules/ui.py index 0e6cf0b1..b2a60f74 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -60,6 +60,7 @@ def list_model_elements(): 'low_vram', 'mlock', 'n_gpu_layers', + 'tensor_split', 'n_ctx', 'n_gqa', 'rms_norm_eps', diff --git a/server.py b/server.py index 9c86d2dc..5157de0d 100644 --- a/server.py +++ b/server.py @@ -227,6 +227,7 @@ def create_model_menus(): shared.gradio['pre_layer'] = gr.Slider(label="pre_layer", minimum=0, maximum=100, value=shared.args.pre_layer[0] if shared.args.pre_layer is not None else 0) shared.gradio['autogptq_info'] = gr.Markdown('* ExLlama_HF is recommended over AutoGPTQ for models derived from LLaMA.') shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7') + shared.gradio['tensor_split'] = gr.Textbox(label='tensor_split', info='Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17') shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=2048, maximum=16384, step=256, info='Maximum sequence length.', value=shared.args.max_seq_len) shared.gradio['compress_pos_emb'] = gr.Slider(label='compress_pos_emb', minimum=1, maximum=8, step=1, info='Positional embeddings compression factor. Should typically be set to max_seq_len / 2048.', value=shared.args.compress_pos_emb) shared.gradio['alpha_value'] = gr.Slider(label='alpha_value', minimum=1, maximum=32, step=1, info='Positional embeddings alpha factor for NTK RoPE scaling. Scaling is not identical to embedding compression. Use either this or compress_pos_emb, not both.', value=shared.args.alpha_value)