From 1d79aa67cf8bbaa162dbe4d2950c52c748fe0446 Mon Sep 17 00:00:00 2001 From: Forkoz <59298527+Ph0rk0z@users.noreply.github.com> Date: Thu, 13 Jun 2024 03:34:54 +0000 Subject: [PATCH] Fix flash-attn UI parameter to actually store true. (#6076) --- modules/loaders.py | 4 ++-- modules/ui.py | 2 +- modules/ui_model_menu.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/loaders.py b/modules/loaders.py index cd9d0f88..5099ffb0 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -46,7 +46,7 @@ loaders_and_params = OrderedDict({ 'no_offload_kqv', 'row_split', 'tensorcores', - 'flash-attn', + 'flash_attn', 'streaming_llm', 'attention_sink_size', ], @@ -72,7 +72,7 @@ loaders_and_params = OrderedDict({ 'no_offload_kqv', 'row_split', 'tensorcores', - 'flash-attn', + 'flash_attn', 'streaming_llm', 'attention_sink_size', 'llamacpp_HF_info', diff --git a/modules/ui.py b/modules/ui.py index a202d325..f88c0a82 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -102,7 +102,7 @@ def list_model_elements(): 'no_offload_kqv', 'row_split', 'tensorcores', - 'flash-attn', + 'flash_attn', 'streaming_llm', 'attention_sink_size', 'hqq_backend', diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index d240bb6a..d8b53b11 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -115,7 +115,7 @@ def create_ui(): shared.gradio['load_in_4bit'] = gr.Checkbox(label="load-in-4bit", value=shared.args.load_in_4bit) shared.gradio['use_double_quant'] = gr.Checkbox(label="use_double_quant", value=shared.args.use_double_quant) shared.gradio['use_flash_attention_2'] = gr.Checkbox(label="use_flash_attention_2", value=shared.args.use_flash_attention_2, info='Set use_flash_attention_2=True while loading the model.') - shared.gradio['flash-attn'] = gr.Checkbox(label="flash-attn", value=shared.args.flash_attn, info='Use flash-attention.') + shared.gradio['flash_attn'] = gr.Checkbox(label="flash_attn", value=shared.args.flash_attn, info='Use flash-attention.') shared.gradio['auto_devices'] = gr.Checkbox(label="auto-devices", value=shared.args.auto_devices) shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='NVIDIA only: use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards.') shared.gradio['streaming_llm'] = gr.Checkbox(label="streaming_llm", value=shared.args.streaming_llm, info='(experimental) Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.')