Add a --torch-compile flag for transformers

This commit is contained in:
oobabooga 2025-01-05 05:45:12 -08:00
parent 11af199aff
commit c0f600c887
5 changed files with 9 additions and 2 deletions

View File

@ -9,12 +9,13 @@ loaders_and_params = OrderedDict({
'Transformers': [ 'Transformers': [
'cpu_memory', 'cpu_memory',
'gpu_memory', 'gpu_memory',
'load_in_4bit',
'load_in_8bit', 'load_in_8bit',
'torch_compile',
'bf16', 'bf16',
'cpu', 'cpu',
'disk', 'disk',
'auto_devices', 'auto_devices',
'load_in_4bit',
'use_double_quant', 'use_double_quant',
'quant_type', 'quant_type',
'compute_dtype', 'compute_dtype',

View File

@ -254,6 +254,9 @@ def huggingface_loader(model_name):
print() print()
model = LoaderClass.from_pretrained(path_to_model, **params) model = LoaderClass.from_pretrained(path_to_model, **params)
if shared.args.torch_compile:
model = torch.compile(model)
return model return model

View File

@ -104,6 +104,7 @@ group.add_argument('--force-safetensors', action='store_true', help='Set use_saf
group.add_argument('--no_use_fast', action='store_true', help='Set use_fast=False while loading the tokenizer (it\'s True by default). Use this if you have any problems related to use_fast.') group.add_argument('--no_use_fast', action='store_true', help='Set use_fast=False while loading the tokenizer (it\'s True by default). Use this if you have any problems related to use_fast.')
group.add_argument('--use_flash_attention_2', action='store_true', help='Set use_flash_attention_2=True while loading the model.') group.add_argument('--use_flash_attention_2', action='store_true', help='Set use_flash_attention_2=True while loading the model.')
group.add_argument('--use_eager_attention', action='store_true', help='Set attn_implementation= eager while loading the model.') group.add_argument('--use_eager_attention', action='store_true', help='Set attn_implementation= eager while loading the model.')
group.add_argument('--torch-compile', action='store_true', help='Compile the model with torch.compile for improved performance.')
# bitsandbytes 4-bit # bitsandbytes 4-bit
group = parser.add_argument_group('bitsandbytes 4-bit') group = parser.add_argument_group('bitsandbytes 4-bit')

View File

@ -109,12 +109,13 @@ def list_model_elements():
'disk', 'disk',
'cpu', 'cpu',
'bf16', 'bf16',
'load_in_4bit',
'load_in_8bit', 'load_in_8bit',
'torch_compile',
'trust_remote_code', 'trust_remote_code',
'no_use_fast', 'no_use_fast',
'use_flash_attention_2', 'use_flash_attention_2',
'use_eager_attention', 'use_eager_attention',
'load_in_4bit',
'compute_dtype', 'compute_dtype',
'quant_type', 'quant_type',
'use_double_quant', 'use_double_quant',

View File

@ -108,6 +108,7 @@ def create_ui():
shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='NVIDIA only: use llama-cpp-python compiled with tensor cores support. This may increase performance on newer cards.') shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='NVIDIA only: use llama-cpp-python compiled with tensor cores support. This may increase performance on newer cards.')
shared.gradio['load_in_8bit'] = gr.Checkbox(label="load-in-8bit", value=shared.args.load_in_8bit) shared.gradio['load_in_8bit'] = gr.Checkbox(label="load-in-8bit", value=shared.args.load_in_8bit)
shared.gradio['load_in_4bit'] = gr.Checkbox(label="load-in-4bit", value=shared.args.load_in_4bit) shared.gradio['load_in_4bit'] = gr.Checkbox(label="load-in-4bit", value=shared.args.load_in_4bit)
shared.gradio['torch_compile'] = gr.Checkbox(label="torch-compile", value=shared.args.torch_compile, info='Compile the model with torch.compile for improved performance.')
shared.gradio['flash_attn'] = gr.Checkbox(label="flash_attn", value=shared.args.flash_attn, info='Use flash-attention.') shared.gradio['flash_attn'] = gr.Checkbox(label="flash_attn", value=shared.args.flash_attn, info='Use flash-attention.')
shared.gradio['use_flash_attention_2'] = gr.Checkbox(label="use_flash_attention_2", value=shared.args.use_flash_attention_2, info='Set use_flash_attention_2=True while loading the model.') shared.gradio['use_flash_attention_2'] = gr.Checkbox(label="use_flash_attention_2", value=shared.args.use_flash_attention_2, info='Set use_flash_attention_2=True while loading the model.')
shared.gradio['streaming_llm'] = gr.Checkbox(label="streaming_llm", value=shared.args.streaming_llm, info='(experimental) Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.') shared.gradio['streaming_llm'] = gr.Checkbox(label="streaming_llm", value=shared.args.streaming_llm, info='(experimental) Activate StreamingLLM to avoid re-evaluating the entire prompt when old messages are removed.')