mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 08:07:56 +01:00
Add --no_xformers and --no_sdpa flags for ExllamaV2
This commit is contained in:
parent
512b311137
commit
e436d69e2b
@ -48,6 +48,8 @@ class Exllamav2Model:
|
|||||||
config.scale_pos_emb = shared.args.compress_pos_emb
|
config.scale_pos_emb = shared.args.compress_pos_emb
|
||||||
config.scale_alpha_value = shared.args.alpha_value
|
config.scale_alpha_value = shared.args.alpha_value
|
||||||
config.no_flash_attn = shared.args.no_flash_attn
|
config.no_flash_attn = shared.args.no_flash_attn
|
||||||
|
config.no_xformers = shared.args.no_xformers
|
||||||
|
config.no_sdpa = shared.args.no_sdpa
|
||||||
config.num_experts_per_token = int(shared.args.num_experts_per_token)
|
config.num_experts_per_token = int(shared.args.num_experts_per_token)
|
||||||
|
|
||||||
model = ExLlamaV2(config)
|
model = ExLlamaV2(config)
|
||||||
|
@ -176,6 +176,8 @@ class Exllamav2HF(PreTrainedModel):
|
|||||||
config.scale_pos_emb = shared.args.compress_pos_emb
|
config.scale_pos_emb = shared.args.compress_pos_emb
|
||||||
config.scale_alpha_value = shared.args.alpha_value
|
config.scale_alpha_value = shared.args.alpha_value
|
||||||
config.no_flash_attn = shared.args.no_flash_attn
|
config.no_flash_attn = shared.args.no_flash_attn
|
||||||
|
config.no_xformers = shared.args.no_xformers
|
||||||
|
config.no_sdpa = shared.args.no_sdpa
|
||||||
config.num_experts_per_token = int(shared.args.num_experts_per_token)
|
config.num_experts_per_token = int(shared.args.num_experts_per_token)
|
||||||
|
|
||||||
return Exllamav2HF(config)
|
return Exllamav2HF(config)
|
||||||
|
@ -84,6 +84,8 @@ loaders_and_params = OrderedDict({
|
|||||||
'max_seq_len',
|
'max_seq_len',
|
||||||
'cfg_cache',
|
'cfg_cache',
|
||||||
'no_flash_attn',
|
'no_flash_attn',
|
||||||
|
'no_xformers',
|
||||||
|
'no_sdpa',
|
||||||
'num_experts_per_token',
|
'num_experts_per_token',
|
||||||
'cache_8bit',
|
'cache_8bit',
|
||||||
'cache_4bit',
|
'cache_4bit',
|
||||||
@ -97,6 +99,8 @@ loaders_and_params = OrderedDict({
|
|||||||
'gpu_split',
|
'gpu_split',
|
||||||
'max_seq_len',
|
'max_seq_len',
|
||||||
'no_flash_attn',
|
'no_flash_attn',
|
||||||
|
'no_xformers',
|
||||||
|
'no_sdpa',
|
||||||
'num_experts_per_token',
|
'num_experts_per_token',
|
||||||
'cache_8bit',
|
'cache_8bit',
|
||||||
'cache_4bit',
|
'cache_4bit',
|
||||||
|
@ -143,6 +143,8 @@ group.add_argument('--autosplit', action='store_true', help='Autosplit the model
|
|||||||
group.add_argument('--max_seq_len', type=int, default=2048, help='Maximum sequence length.')
|
group.add_argument('--max_seq_len', type=int, default=2048, help='Maximum sequence length.')
|
||||||
group.add_argument('--cfg-cache', action='store_true', help='ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader.')
|
group.add_argument('--cfg-cache', action='store_true', help='ExLlamav2_HF: Create an additional cache for CFG negative prompts. Necessary to use CFG with that loader.')
|
||||||
group.add_argument('--no_flash_attn', action='store_true', help='Force flash-attention to not be used.')
|
group.add_argument('--no_flash_attn', action='store_true', help='Force flash-attention to not be used.')
|
||||||
|
group.add_argument('--no_xformers', action='store_true', help='Force xformers to not be used.')
|
||||||
|
group.add_argument('--no_sdpa', action='store_true', help='Force Torch SDPA to not be used.')
|
||||||
group.add_argument('--cache_8bit', action='store_true', help='Use 8-bit cache to save VRAM.')
|
group.add_argument('--cache_8bit', action='store_true', help='Use 8-bit cache to save VRAM.')
|
||||||
group.add_argument('--cache_4bit', action='store_true', help='Use Q4 cache to save VRAM.')
|
group.add_argument('--cache_4bit', action='store_true', help='Use Q4 cache to save VRAM.')
|
||||||
group.add_argument('--num_experts_per_token', type=int, default=2, help='Number of experts to use for generation. Applies to MoE models like Mixtral.')
|
group.add_argument('--num_experts_per_token', type=int, default=2, help='Number of experts to use for generation. Applies to MoE models like Mixtral.')
|
||||||
|
@ -85,6 +85,8 @@ def list_model_elements():
|
|||||||
'disable_exllamav2',
|
'disable_exllamav2',
|
||||||
'cfg_cache',
|
'cfg_cache',
|
||||||
'no_flash_attn',
|
'no_flash_attn',
|
||||||
|
'no_xformers',
|
||||||
|
'no_sdpa',
|
||||||
'num_experts_per_token',
|
'num_experts_per_token',
|
||||||
'cache_8bit',
|
'cache_8bit',
|
||||||
'cache_4bit',
|
'cache_4bit',
|
||||||
|
@ -138,7 +138,9 @@ def create_ui():
|
|||||||
shared.gradio['disk'] = gr.Checkbox(label="disk", value=shared.args.disk)
|
shared.gradio['disk'] = gr.Checkbox(label="disk", value=shared.args.disk)
|
||||||
shared.gradio['bf16'] = gr.Checkbox(label="bf16", value=shared.args.bf16)
|
shared.gradio['bf16'] = gr.Checkbox(label="bf16", value=shared.args.bf16)
|
||||||
shared.gradio['autosplit'] = gr.Checkbox(label="autosplit", value=shared.args.autosplit, info='Automatically split the model tensors across the available GPUs.')
|
shared.gradio['autosplit'] = gr.Checkbox(label="autosplit", value=shared.args.autosplit, info='Automatically split the model tensors across the available GPUs.')
|
||||||
shared.gradio['no_flash_attn'] = gr.Checkbox(label="no_flash_attn", value=shared.args.no_flash_attn, info='Force flash-attention to not be used.')
|
shared.gradio['no_flash_attn'] = gr.Checkbox(label="no_flash_attn", value=shared.args.no_flash_attn)
|
||||||
|
shared.gradio['no_xformers'] = gr.Checkbox(label="no_xformers", value=shared.args.no_xformers)
|
||||||
|
shared.gradio['no_sdpa'] = gr.Checkbox(label="no_sdpa", value=shared.args.no_sdpa)
|
||||||
shared.gradio['cfg_cache'] = gr.Checkbox(label="cfg-cache", value=shared.args.cfg_cache, info='Necessary to use CFG with this loader.')
|
shared.gradio['cfg_cache'] = gr.Checkbox(label="cfg-cache", value=shared.args.cfg_cache, info='Necessary to use CFG with this loader.')
|
||||||
shared.gradio['cpp_runner'] = gr.Checkbox(label="cpp-runner", value=shared.args.cpp_runner, info='Enable inference with ModelRunnerCpp, which is faster than the default ModelRunner.')
|
shared.gradio['cpp_runner'] = gr.Checkbox(label="cpp-runner", value=shared.args.cpp_runner, info='Enable inference with ModelRunnerCpp, which is faster than the default ModelRunner.')
|
||||||
shared.gradio['num_experts_per_token'] = gr.Number(label="Number of experts per token", value=shared.args.num_experts_per_token, info='Only applies to MoE models like Mixtral.')
|
shared.gradio['num_experts_per_token'] = gr.Number(label="Number of experts per token", value=shared.args.num_experts_per_token, info='Only applies to MoE models like Mixtral.')
|
||||||
|
Loading…
Reference in New Issue
Block a user