From 8a39f579d88ee226365aa49518f509022dbd9f27 Mon Sep 17 00:00:00 2001 From: GralchemOz <68577430+GralchemOz@users.noreply.github.com> Date: Mon, 1 Jul 2024 23:08:08 +0800 Subject: [PATCH] transformers: Add eager attention option to make Gemma-2 work properly (#6188) --- modules/loaders.py | 1 + modules/models.py | 3 +++ modules/shared.py | 1 + modules/ui.py | 1 + modules/ui_model_menu.py | 1 + 5 files changed, 7 insertions(+) diff --git a/modules/loaders.py b/modules/loaders.py index 5d3adacf..78601c17 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -21,6 +21,7 @@ loaders_and_params = OrderedDict({ 'trust_remote_code', 'no_use_fast', 'use_flash_attention_2', + 'use_eager_attention', 'alpha_value', 'compress_pos_emb', 'disable_exllama', diff --git a/modules/models.py b/modules/models.py index da741cb0..5db067dc 100644 --- a/modules/models.py +++ b/modules/models.py @@ -146,6 +146,9 @@ def huggingface_loader(model_name): if shared.args.force_safetensors: params['force_safetensors'] = True + if shared.args.use_eager_attention: + params['attn_implementation'] = 'eager' + config = AutoConfig.from_pretrained(path_to_model, trust_remote_code=shared.args.trust_remote_code) if 'chatglm' in model_name.lower(): diff --git a/modules/shared.py b/modules/shared.py index ebbfc268..e04c549a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -106,6 +106,7 @@ group.add_argument('--trust-remote-code', action='store_true', help='Set trust_r group.add_argument('--force-safetensors', action='store_true', help='Set use_safetensors=True while loading the model. This prevents arbitrary code execution.') group.add_argument('--no_use_fast', action='store_true', help='Set use_fast=False while loading the tokenizer (it\'s True by default). Use this if you have any problems related to use_fast.') group.add_argument('--use_flash_attention_2', action='store_true', help='Set use_flash_attention_2=True while loading the model.') +group.add_argument('--use_eager_attention', action='store_true', help='Set attn_implementation= eager while loading the model.') # bitsandbytes 4-bit group = parser.add_argument_group('bitsandbytes 4-bit') diff --git a/modules/ui.py b/modules/ui.py index b9267833..b1c1cf6d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -69,6 +69,7 @@ def list_model_elements(): 'trust_remote_code', 'no_use_fast', 'use_flash_attention_2', + 'use_eager_attention', 'load_in_4bit', 'compute_dtype', 'quant_type', diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 3ebcd126..df53c859 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -115,6 +115,7 @@ def create_ui(): shared.gradio['load_in_4bit'] = gr.Checkbox(label="load-in-4bit", value=shared.args.load_in_4bit) shared.gradio['use_double_quant'] = gr.Checkbox(label="use_double_quant", value=shared.args.use_double_quant) shared.gradio['use_flash_attention_2'] = gr.Checkbox(label="use_flash_attention_2", value=shared.args.use_flash_attention_2, info='Set use_flash_attention_2=True while loading the model.') + shared.gradio['use_eager_attention'] = gr.Checkbox(label="use_eager_attention", value=shared.args.use_eager_attention, info='Set attn_implementation= eager while loading the model.') shared.gradio['flash_attn'] = gr.Checkbox(label="flash_attn", value=shared.args.flash_attn, info='Use flash-attention.') shared.gradio['auto_devices'] = gr.Checkbox(label="auto-devices", value=shared.args.auto_devices) shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='NVIDIA only: use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards.')