mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 16:17:57 +01:00
Falcon support (trust-remote-code and autogptq checkboxes) (#2367)
--------- Co-authored-by: oobabooga <112222186+oobabooga@users.noreply.github.com>
This commit is contained in:
parent
60ae80cf28
commit
204731952a
@ -226,7 +226,7 @@ Optionally, you can use the following command-line flags:
|
|||||||
| `--no-cache` | Set `use_cache` to False while generating text. This reduces the VRAM usage a bit with a performance cost. |
|
| `--no-cache` | Set `use_cache` to False while generating text. This reduces the VRAM usage a bit with a performance cost. |
|
||||||
| `--xformers` | Use xformer's memory efficient attention. This should increase your tokens/s. |
|
| `--xformers` | Use xformer's memory efficient attention. This should increase your tokens/s. |
|
||||||
| `--sdp-attention` | Use torch 2.0's sdp attention. |
|
| `--sdp-attention` | Use torch 2.0's sdp attention. |
|
||||||
| `--trust-remote-code` | Set trust_remote_code=True while loading a model. Necessary for ChatGLM. |
|
| `--trust-remote-code` | Set trust_remote_code=True while loading a model. Necessary for ChatGLM and Falcon. |
|
||||||
|
|
||||||
#### Accelerate 4-bit
|
#### Accelerate 4-bit
|
||||||
|
|
||||||
|
@ -35,6 +35,7 @@ def load_quantized(model_name):
|
|||||||
'device': "cuda:0" if not shared.args.cpu else "cpu",
|
'device': "cuda:0" if not shared.args.cpu else "cpu",
|
||||||
'use_triton': shared.args.triton,
|
'use_triton': shared.args.triton,
|
||||||
'use_safetensors': use_safetensors,
|
'use_safetensors': use_safetensors,
|
||||||
|
'trust_remote_code': shared.args.trust_remote_code,
|
||||||
'max_memory': get_max_memory_dict()
|
'max_memory': get_max_memory_dict()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ parser.add_argument('--bf16', action='store_true', help='Load the model with bfl
|
|||||||
parser.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces the VRAM usage a bit at a performance cost.')
|
parser.add_argument('--no-cache', action='store_true', help='Set use_cache to False while generating text. This reduces the VRAM usage a bit at a performance cost.')
|
||||||
parser.add_argument('--xformers', action='store_true', help="Use xformer's memory efficient attention. This should increase your tokens/s.")
|
parser.add_argument('--xformers', action='store_true', help="Use xformer's memory efficient attention. This should increase your tokens/s.")
|
||||||
parser.add_argument('--sdp-attention', action='store_true', help="Use torch 2.0's sdp attention.")
|
parser.add_argument('--sdp-attention', action='store_true', help="Use torch 2.0's sdp attention.")
|
||||||
parser.add_argument('--trust-remote-code', action='store_true', help="Set trust_remote_code=True while loading a model. Necessary for ChatGLM.")
|
parser.add_argument('--trust-remote-code', action='store_true', help="Set trust_remote_code=True while loading a model. Necessary for ChatGLM and Falcon.")
|
||||||
|
|
||||||
# Accelerate 4-bit
|
# Accelerate 4-bit
|
||||||
parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision (using bitsandbytes).')
|
parser.add_argument('--load-in-4bit', action='store_true', help='Load the model with 4-bit precision (using bitsandbytes).')
|
||||||
|
@ -30,7 +30,7 @@ theme = gr.themes.Default(
|
|||||||
|
|
||||||
|
|
||||||
def list_model_elements():
|
def list_model_elements():
|
||||||
elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed']
|
elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'trust_remote_code', 'load_in_4bit', 'compute_dtype', 'quant_type', 'use_double_quant', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'autogptq', 'threads', 'n_batch', 'no_mmap', 'mlock', 'n_gpu_layers', 'n_ctx', 'llama_cpp_seed']
|
||||||
for i in range(torch.cuda.device_count()):
|
for i in range(torch.cuda.device_count()):
|
||||||
elements.append(f'gpu_memory_{i}')
|
elements.append(f'gpu_memory_{i}')
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
colorama
|
colorama
|
||||||
datasets
|
datasets
|
||||||
|
einops
|
||||||
flexgen==0.1.7
|
flexgen==0.1.7
|
||||||
gradio_client==0.2.5
|
gradio_client==0.2.5
|
||||||
gradio==3.31.0
|
gradio==3.31.0
|
||||||
|
@ -366,7 +366,8 @@ def create_model_menus():
|
|||||||
shared.gradio['cpu'] = gr.Checkbox(label="cpu", value=shared.args.cpu)
|
shared.gradio['cpu'] = gr.Checkbox(label="cpu", value=shared.args.cpu)
|
||||||
shared.gradio['bf16'] = gr.Checkbox(label="bf16", value=shared.args.bf16)
|
shared.gradio['bf16'] = gr.Checkbox(label="bf16", value=shared.args.bf16)
|
||||||
shared.gradio['load_in_8bit'] = gr.Checkbox(label="load-in-8bit", value=shared.args.load_in_8bit)
|
shared.gradio['load_in_8bit'] = gr.Checkbox(label="load-in-8bit", value=shared.args.load_in_8bit)
|
||||||
|
shared.gradio['trust_remote_code'] = gr.Checkbox(label="trust-remote-code", value=shared.args.trust_remote_code, info='Make sure to inspect the .py files inside the model folder before loading it with this option enabled.')
|
||||||
|
|
||||||
with gr.Box():
|
with gr.Box():
|
||||||
gr.Markdown('Transformers 4-bit')
|
gr.Markdown('Transformers 4-bit')
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
@ -391,9 +392,10 @@ def create_model_menus():
|
|||||||
with gr.Column():
|
with gr.Column():
|
||||||
shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=shared.args.wbits if shared.args.wbits > 0 else "None")
|
shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=shared.args.wbits if shared.args.wbits > 0 else "None")
|
||||||
shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=shared.args.groupsize if shared.args.groupsize > 0 else "None")
|
shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=shared.args.groupsize if shared.args.groupsize > 0 else "None")
|
||||||
|
shared.gradio['model_type'] = gr.Dropdown(label="model_type", choices=["None", "llama", "opt", "gptj"], value=shared.args.model_type or "None")
|
||||||
|
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
shared.gradio['model_type'] = gr.Dropdown(label="model_type", choices=["None", "llama", "opt", "gptj"], value=shared.args.model_type or "None")
|
shared.gradio['autogptq'] = gr.Checkbox(label="autogptq", value=shared.args.autogptq, info='AutoGPTQ needs to be manually installed from source. When enabled, gpu-memory should be used for CPU offloading instead of pre_layer.')
|
||||||
shared.gradio['pre_layer'] = gr.Slider(label="pre_layer", minimum=0, maximum=100, value=shared.args.pre_layer[0] if shared.args.pre_layer is not None else 0)
|
shared.gradio['pre_layer'] = gr.Slider(label="pre_layer", minimum=0, maximum=100, value=shared.args.pre_layer[0] if shared.args.pre_layer is not None else 0)
|
||||||
|
|
||||||
with gr.Box():
|
with gr.Box():
|
||||||
|
Loading…
Reference in New Issue
Block a user