From 4ec42679e31924809d7734ea40da2f836bad4010 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 18 Aug 2023 07:58:20 -0700 Subject: [PATCH] Add --mul_mat_q param --- README.md | 1 + modules/llamacpp_hf.py | 1 + modules/llamacpp_model.py | 1 + modules/shared.py | 1 + modules/ui.py | 1 + modules/ui_model_menu.py | 1 + 6 files changed, 6 insertions(+) diff --git a/README.md b/README.md index 4607ab5b..5b1e95c3 100644 --- a/README.md +++ b/README.md @@ -261,6 +261,7 @@ Optionally, you can use the following command-line flags: |-------------|-------------| | `--no-mmap` | Prevent mmap from being used. | | `--mlock` | Force the system to keep the model in RAM. | +| `--mul_mat_q` | Activate new mulmat kernels. | | `--cache-capacity CACHE_CAPACITY` | Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed. | | `--tensor_split TENSOR_SPLIT` | Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17 | | `--llama_cpp_seed SEED` | Seed for llama-cpp models. Default 0 (random). | diff --git a/modules/llamacpp_hf.py b/modules/llamacpp_hf.py index 7deae98a..10c30112 100644 --- a/modules/llamacpp_hf.py +++ b/modules/llamacpp_hf.py @@ -116,6 +116,7 @@ class LlamacppHF(PreTrainedModel): 'n_batch': shared.args.n_batch, 'use_mmap': not shared.args.no_mmap, 'use_mlock': shared.args.mlock, + 'mul_mat_q': shared.args.mul_mat_q, 'low_vram': shared.args.low_vram, 'n_gpu_layers': shared.args.n_gpu_layers, 'rope_freq_base': 10000 * shared.args.alpha_value ** (64 / 63.), diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index 0e635da4..28a38de6 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -69,6 +69,7 @@ class LlamaCppModel: 'n_batch': shared.args.n_batch, 'use_mmap': not shared.args.no_mmap, 'use_mlock': shared.args.mlock, + 'mul_mat_q': shared.args.mul_mat_q, 'low_vram': shared.args.low_vram, 'n_gpu_layers': shared.args.n_gpu_layers, 'rope_freq_base': 10000 * shared.args.alpha_value ** (64 / 63.), diff --git a/modules/shared.py b/modules/shared.py index 23eb3983..385b99da 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -119,6 +119,7 @@ parser.add_argument('--n_batch', type=int, default=512, help='Maximum number of parser.add_argument('--no-mmap', action='store_true', help='Prevent mmap from being used.') parser.add_argument('--low-vram', action='store_true', help='Low VRAM Mode') parser.add_argument('--mlock', action='store_true', help='Force the system to keep the model in RAM.') +parser.add_argument('--mul_mat_q', action='store_true', help='Activate new mulmat kernels.') parser.add_argument('--cache-capacity', type=str, help='Maximum cache capacity. Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.') parser.add_argument('--n-gpu-layers', type=int, default=0, help='Number of layers to offload to the GPU.') parser.add_argument('--tensor_split', type=str, default=None, help="Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17") diff --git a/modules/ui.py b/modules/ui.py index ea1ca74b..15f24d85 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -68,6 +68,7 @@ def list_model_elements(): 'no_mmap', 'low_vram', 'mlock', + 'mul_mat_q', 'n_gpu_layers', 'tensor_split', 'n_ctx', diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 21507530..8e24ebdf 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -110,6 +110,7 @@ def create_ui(): shared.gradio['no_mmap'] = gr.Checkbox(label="no-mmap", value=shared.args.no_mmap) shared.gradio['low_vram'] = gr.Checkbox(label="low-vram", value=shared.args.low_vram) shared.gradio['mlock'] = gr.Checkbox(label="mlock", value=shared.args.mlock) + shared.gradio['mul_mat_q'] = gr.Checkbox(label="mul_mat_q", value=shared.args.mul_mat_q) shared.gradio['tensor_split'] = gr.Textbox(label='tensor_split', info='Split the model across multiple GPUs, comma-separated list of proportions, e.g. 18,17') shared.gradio['llama_cpp_seed'] = gr.Number(label='Seed (0 for random)', value=shared.args.llama_cpp_seed) shared.gradio['trust_remote_code'] = gr.Checkbox(label="trust-remote-code", value=shared.args.trust_remote_code, info='Make sure to inspect the .py files inside the model folder before loading it with this option enabled.')