mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2025-02-05 00:10:46 +01:00
Add a --torch-compile flag for transformers
This commit is contained in:
parent
11af199aff
commit
c0f600c887
@ -9,12 +9,13 @@ loaders_and_params = OrderedDict({
|
||||
'Transformers': [
|
||||
'cpu_memory',
|
||||
'gpu_memory',
|
||||
'load_in_4bit',
|
||||
'load_in_8bit',
|
||||
'torch_compile',
|
||||
'bf16',
|
||||
'cpu',
|
||||
'disk',
|
||||
'auto_devices',
|
||||
'load_in_4bit',
|
||||
'use_double_quant',
|
||||
'quant_type',
|
||||
'compute_dtype',
|
||||
|
@ -254,6 +254,9 @@ def huggingface_loader(model_name):
|
||||
print()
|
||||
model = LoaderClass.from_pretrained(path_to_model, **params)
|
||||
|
||||
if shared.args.torch_compile:
|
||||
model = torch.compile(model)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
|
@ -104,6 +104,7 @@ group.add_argument('--force-safetensors', action='store_true', help='Set use_saf
|
||||
group.add_argument('--no_use_fast', action='store_true', help='Set use_fast=False while loading the tokenizer (it\'s True by default). Use this if you have any problems related to use_fast.')
|
||||
group.add_argument('--use_flash_attention_2', action='store_true', help='Set use_flash_attention_2=True while loading the model.')
|
||||
group.add_argument('--use_eager_attention', action='store_true', help='Set attn_implementation= eager while loading the model.')
|
||||
group.add_argument('--torch-compile', action='store_true', help='Compile the model with torch.compile for improved performance.')
|
||||
|
||||
# bitsandbytes 4-bit
|
||||
group = parser.add_argument_group('bitsandbytes 4-bit')
|
||||
|
@ -109,12 +109,13 @@ def list_model_elements():
|
||||
'disk',
|
||||
'cpu',
|
||||
'bf16',
|
||||
'load_in_4bit',
|
||||
'load_in_8bit',
|
||||
'torch_compile',
|
||||
'trust_remote_code',
|
||||
'no_use_fast',
|
||||
'use_flash_attention_2',
|
||||
'use_eager_attention',
|
||||
'load_in_4bit',
|
||||
'compute_dtype',
|
||||
'quant_type',
|
||||
'use_double_quant',
|
||||
|
@ -108,6 +108,7 @@ def create_ui():
|
||||
shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='NVIDIA only: use llama-cpp-python compiled with tensor cores support. This may increase performance on newer cards.')
|
||||
shared.gradio['load_in_8bit'] = gr.Checkbox(label="load-in-8bit", value=shared.args.load_in_8bit)
|
||||
shared.gradio['load_in_4bit'] = gr.Checkbox(label="load-in-4bit", value=shared.args.load_in_4bit)
|
||||
shared.gradio['torch_compile'] = gr.Checkbox(label="torch-compile", value=shared.args.torch_compile, info='Compile the model with torch.compile for improved performance.')
|
||||
shared.gradio['flash_attn'] = gr.Checkbox(label="flash_attn", value=shared.args.flash_attn, info='Use flash-attention.')
|
||||
shared.gradio['use_flash_attention_2'] = gr.Checkbox(label="use_flash_attention_2", value=shared.args.use_flash_attention_2, info='Set use_flash_attention_2=True while loading the model.')
|
||||
shared.gradio['streaming_llm'] = gr.Checkbox(label="streaming_llm", value=shared.args.streaming_llm, info='(experimental) Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.')
|
||||
|
Loading…
Reference in New Issue
Block a user