From f06a1387f0e5250ca8994320f1aaace4d07c6c34 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 6 Jun 2023 07:58:07 -0300 Subject: [PATCH] Reorganize Models tab --- server.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/server.py b/server.py index e9245e81..5e770d1b 100644 --- a/server.py +++ b/server.py @@ -377,13 +377,12 @@ def create_model_menus(): with gr.Box(): with gr.Row(): with gr.Column(): - gr.Markdown('AutoGPTQ') + gr.Markdown('GPTQ') shared.gradio['triton'] = gr.Checkbox(label="triton", value=shared.args.triton) shared.gradio['desc_act'] = gr.Checkbox(label="desc_act", value=shared.args.desc_act, info='\'desc_act\', \'wbits\', and \'groupsize\' are used for old models without a quantize_config.json.') + shared.gradio['gptq_for_llama'] = gr.Checkbox(label="gptq-for-llama", value=shared.args.gptq_for_llama, info='Use GPTQ-for-LLaMa loader instead of AutoGPTQ. pre_layer should be used for CPU offloading instead of gpu-memory.') with gr.Column(): - gr.Markdown('GPTQ-for-LLaMa') - shared.gradio['gptq_for_llama'] = gr.Checkbox(label="gptq-for-llama", value=shared.args.gptq_for_llama, info='Use GPTQ-for-LLaMa to load the GPTQ model instead of AutoGPTQ. pre_layer should be used for CPU offloading instead of gpu-memory.') with gr.Row(): shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=shared.args.wbits if shared.args.wbits > 0 else "None") shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=shared.args.groupsize if shared.args.groupsize > 0 else "None")