diff --git a/README.md b/README.md index 0dd92c42..05fb9806 100644 --- a/README.md +++ b/README.md @@ -249,8 +249,10 @@ Optionally, you can use the following command-line flags: | Flag | Description | |------------------|-------------| -| `--triton` | Use triton. | -| `--desc_act` | For models that don't have a quantize_config.json, this parameter is used to define whether to set desc_act or not in BaseQuantizeConfig. | +| `--triton` | Use triton. | +| `--no_inject_fused_attention` | Disable the use of fused attention, which will use less VRAM at the cost of slower inference. | +| `--no_inject_fused_mlp` | Triton mode only: disable the use of fused MLP, which will use less VRAM at the cost of slower inference. | +| `--desc_act` | For models that don't have a quantize_config.json, this parameter is used to define whether to set desc_act or not in BaseQuantizeConfig. | #### GPTQ-for-LLaMa diff --git a/modules/AutoGPTQ_loader.py b/modules/AutoGPTQ_loader.py index 8cb078af..1bbd33f0 100644 --- a/modules/AutoGPTQ_loader.py +++ b/modules/AutoGPTQ_loader.py @@ -43,6 +43,8 @@ def load_quantized(model_name): 'model_basename': pt_path.stem, 'device': "cuda:0" if not shared.args.cpu else "cpu", 'use_triton': shared.args.triton, + 'inject_fused_attention': not shared.args.no_inject_fused_attention, + 'inject_fused_mlp': not shared.args.no_inject_fused_mlp, 'use_safetensors': use_safetensors, 'trust_remote_code': shared.args.trust_remote_code, 'max_memory': get_max_memory_dict(), diff --git a/modules/shared.py b/modules/shared.py index 9f4f720c..865e062b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -142,6 +142,8 @@ parser.add_argument('--fused_mlp', action='store_true', help='(triton) Enable fu parser.add_argument('--gptq-for-llama', action='store_true', help='Use GPTQ-for-LLaMa to load the GPTQ model instead of AutoGPTQ.') parser.add_argument('--autogptq', action='store_true', help='DEPRECATED') parser.add_argument('--triton', action='store_true', help='Use triton.') +parser.add_argument('--no_inject_fused_attention', action='store_true', help='Do not use fused attention (lowers VRAM requirements).') +parser.add_argument('--no_inject_fused_mlp', action='store_true', help='Triton mode only: Do not use fused MLP (lowers VRAM requirements).') parser.add_argument('--desc_act', action='store_true', help='For models that don\'t have a quantize_config.json, this parameter is used to define whether to set desc_act or not in BaseQuantizeConfig.') # FlexGen diff --git a/modules/ui.py b/modules/ui.py index d930ee28..d39bae72 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -30,7 +30,7 @@ theme = gr.themes.Default( def list_model_elements(): - elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'trust_remote_code', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'gptq_for_llama', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'triton', 'desc_act', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed'] + elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'trust_remote_code', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'gptq_for_llama', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'triton', 'desc_act', 'no_inject_fused_attention', 'no_inject_fused_mlp', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed'] for i in range(torch.cuda.device_count()): elements.append(f'gpu_memory_{i}') diff --git a/server.py b/server.py index e3149a19..4fd4ee21 100644 --- a/server.py +++ b/server.py @@ -321,6 +321,8 @@ def create_model_menus(): with gr.Column(): gr.Markdown('GPTQ') shared.gradio['triton'] = gr.Checkbox(label="triton", value=shared.args.triton) + shared.gradio['no_inject_fused_attention'] = gr.Checkbox(label="no_inject_fused_attention", value=shared.args.no_inject_fused_attention, info='Disable fused attention. Fused attention improves inference performance but uses more VRAM. Disable if running low on VRAM.') + shared.gradio['no_inject_fused_mlp'] = gr.Checkbox(label="no_inject_fused_mlp", value=shared.args.no_inject_fused_mlp, info='Affects Triton only. Disable fused MLP. Fused MLP improves performance but uses more VRAM. Disable if running low on VRAM.') shared.gradio['desc_act'] = gr.Checkbox(label="desc_act", value=shared.args.desc_act, info='\'desc_act\', \'wbits\', and \'groupsize\' are used for old models without a quantize_config.json.') shared.gradio['gptq_for_llama'] = gr.Checkbox(label="gptq-for-llama", value=shared.args.gptq_for_llama, info='Use GPTQ-for-LLaMa loader instead of AutoGPTQ. pre_layer should be used for CPU offloading instead of gpu-memory.')