From 3df7e151f7c3a7e02a2831fe1fcd7cd15881bc91 Mon Sep 17 00:00:00 2001 From: Badis Ghoubali <110173477+BadisG@users.noreply.github.com> Date: Sun, 4 Feb 2024 22:15:30 +0100 Subject: [PATCH 1/3] fix the n_batch slider (#5436) --- modules/ui_model_menu.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index ebed4aa0..12da92d4 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -90,7 +90,7 @@ def create_ui(): shared.gradio['n_ctx'] = gr.Slider(minimum=0, maximum=shared.settings['truncation_length_max'], step=256, label="n_ctx", value=shared.args.n_ctx, info='Context length. Try lowering this if you run out of memory while loading the model.') shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=32, value=shared.args.threads) shared.gradio['threads_batch'] = gr.Slider(label="threads_batch", minimum=0, step=1, maximum=32, value=shared.args.threads_batch) - shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, value=shared.args.n_batch) + shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, step=1, value=shared.args.n_batch) shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=shared.args.wbits if shared.args.wbits > 0 else "None") shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=shared.args.groupsize if shared.args.groupsize > 0 else "None") From 2a45620c851e6b4244697b1901de523392b7b6a5 Mon Sep 17 00:00:00 2001 From: Forkoz <59298527+Ph0rk0z@users.noreply.github.com> Date: Mon, 5 Feb 2024 02:36:40 +0000 Subject: [PATCH 2/3] Split by rows instead of layers for llama.cpp multi-gpu (#5435) --- modules/llamacpp_hf.py | 3 ++- modules/llamacpp_model.py | 3 ++- modules/loaders.py | 2 ++ modules/shared.py | 1 + modules/ui.py | 1 + modules/ui_model_menu.py | 1 + 6 files changed, 9 insertions(+), 2 deletions(-) diff --git a/modules/llamacpp_hf.py b/modules/llamacpp_hf.py index d491c463..4726669b 100644 --- a/modules/llamacpp_hf.py +++ b/modules/llamacpp_hf.py @@ -216,7 +216,8 @@ class LlamacppHF(PreTrainedModel): 'tensor_split': tensor_split_list, 'rope_freq_scale': 1.0 / shared.args.compress_pos_emb, 'logits_all': shared.args.logits_all, - 'offload_kqv': not shared.args.no_offload_kqv + 'offload_kqv': not shared.args.no_offload_kqv, + 'split_mode': 1 if not shared.args.row_split else 2 } Llama = llama_cpp_lib().Llama diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index 96ea98e9..7c405a4b 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -95,7 +95,8 @@ class LlamaCppModel: 'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base), 'tensor_split': tensor_split_list, 'rope_freq_scale': 1.0 / shared.args.compress_pos_emb, - 'offload_kqv': not shared.args.no_offload_kqv + 'offload_kqv': not shared.args.no_offload_kqv, + 'split_mode': 1 if not shared.args.row_split else 2 } result.model = Llama(**params) diff --git a/modules/loaders.py b/modules/loaders.py index 5b39c379..a0104e90 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -44,6 +44,7 @@ loaders_and_params = OrderedDict({ 'cpu', 'numa', 'no_offload_kqv', + 'row_split', 'tensorcores', ], 'llamacpp_HF': [ @@ -66,6 +67,7 @@ loaders_and_params = OrderedDict({ 'no_use_fast', 'logits_all', 'no_offload_kqv', + 'row_split', 'tensorcores', 'llamacpp_HF_info', ], diff --git a/modules/shared.py b/modules/shared.py index cc8b9e5d..38d08349 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -129,6 +129,7 @@ group.add_argument('--numa', action='store_true', help='Activate NUMA task alloc group.add_argument('--logits_all', action='store_true', help='Needs to be set for perplexity evaluation to work. Otherwise, ignore it, as it makes prompt processing slower.') group.add_argument('--no_offload_kqv', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.') group.add_argument('--cache-capacity', type=str, help='Maximum cache capacity (llama-cpp-python). Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.') +group.add_argument('--row_split', action='store_true', help='Split multi-gpu by row instead of layer. Faster on some cards.') # ExLlamaV2 group = parser.add_argument_group('ExLlamaV2') diff --git a/modules/ui.py b/modules/ui.py index 53a8fd14..acd959a0 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -93,6 +93,7 @@ def list_model_elements(): 'numa', 'logits_all', 'no_offload_kqv', + 'row_split', 'tensorcores', 'hqq_backend', ] diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 12da92d4..f03d45c9 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -107,6 +107,7 @@ def create_ui(): with gr.Column(): shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='Use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards. NVIDIA only.') shared.gradio['no_offload_kqv'] = gr.Checkbox(label="no_offload_kqv", value=shared.args.no_offload_kqv, info='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.') + shared.gradio['row_split'] = gr.Checkbox(label="row_split", value=shared.args.row_split, info='Split model by rows across GPUs. Improves performance on some cards.') shared.gradio['triton'] = gr.Checkbox(label="triton", value=shared.args.triton) shared.gradio['no_inject_fused_attention'] = gr.Checkbox(label="no_inject_fused_attention", value=shared.args.no_inject_fused_attention, info='Disable fused attention. Fused attention improves inference performance but uses more VRAM. Fuses layers for AutoAWQ. Disable if running low on VRAM.') shared.gradio['no_inject_fused_mlp'] = gr.Checkbox(label="no_inject_fused_mlp", value=shared.args.no_inject_fused_mlp, info='Affects Triton only. Disable fused MLP. Fused MLP improves performance but uses more VRAM. Disable if running low on VRAM.') From 9fdee65cf50923be9cf36b50bc6a7140c7f2f2d0 Mon Sep 17 00:00:00 2001 From: Badis Ghoubali <110173477+BadisG@users.noreply.github.com> Date: Mon, 5 Feb 2024 03:39:15 +0100 Subject: [PATCH 3/3] Improve ChatML template (#5411) --- instruction-templates/ChatML.yaml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/instruction-templates/ChatML.yaml b/instruction-templates/ChatML.yaml index e9f2883f..8b55f0dc 100644 --- a/instruction-templates/ChatML.yaml +++ b/instruction-templates/ChatML.yaml @@ -5,15 +5,12 @@ instruction_template: |- {%- set ns.found = true -%} {%- endif -%} {%- endfor -%} - {%- if not ns.found -%} - {{- '<|im_start|>system\n' + '' + '<|im_end|>\n' -}} - {%- endif %} {%- for message in messages %} {%- if message['role'] == 'system' -%} - {{- '<|im_start|>system\n' + message['content'] + '<|im_end|>\n' -}} + {{- '<|im_start|>system\n' + message['content'].rstrip() + '<|im_end|>\n' -}} {%- else -%} {%- if message['role'] == 'user' -%} - {{-'<|im_start|>user\n' + message['content'] + '<|im_end|>\n'-}} + {{-'<|im_start|>user\n' + message['content'].rstrip() + '<|im_end|>\n'-}} {%- else -%} {{-'<|im_start|>assistant\n' + message['content'] + '<|im_end|>\n' -}} {%- endif -%}