mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-12-25 22:08:53 +01:00
Minor fix after bd7cc4234d
(thanks @belladoreai)
This commit is contained in:
parent
ae86292159
commit
9e189947d1
@ -12,7 +12,6 @@ def get_fallback_settings():
|
||||
'wbits': 'None',
|
||||
'groupsize': 'None',
|
||||
'desc_act': False,
|
||||
'model_type': 'None',
|
||||
'max_seq_len': 2048,
|
||||
'n_ctx': 2048,
|
||||
'rope_freq_base': 0,
|
||||
@ -199,7 +198,7 @@ def update_model_parameters(state, initial=False):
|
||||
continue
|
||||
|
||||
# Setting null defaults
|
||||
if element in ['wbits', 'groupsize', 'model_type'] and value == 'None':
|
||||
if element in ['wbits', 'groupsize'] and value == 'None':
|
||||
value = vars(shared.args_defaults)[element]
|
||||
elif element in ['cpu_memory'] and value == 0:
|
||||
value = vars(shared.args_defaults)[element]
|
||||
|
@ -70,7 +70,6 @@ def list_model_elements():
|
||||
'use_double_quant',
|
||||
'wbits',
|
||||
'groupsize',
|
||||
'model_type',
|
||||
'pre_layer',
|
||||
'triton',
|
||||
'desc_act',
|
||||
|
@ -101,7 +101,6 @@ def create_ui():
|
||||
shared.gradio['threads_batch'] = gr.Slider(label="threads_batch", minimum=0, step=1, maximum=256, value=shared.args.threads_batch)
|
||||
shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=shared.args.wbits if shared.args.wbits > 0 else "None")
|
||||
shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=shared.args.groupsize if shared.args.groupsize > 0 else "None")
|
||||
shared.gradio['model_type'] = gr.Dropdown(label="model_type", choices=["None"], value=shared.args.model_type or "None")
|
||||
shared.gradio['pre_layer'] = gr.Slider(label="pre_layer", minimum=0, maximum=100, value=shared.args.pre_layer[0] if shared.args.pre_layer is not None else 0)
|
||||
shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7')
|
||||
shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=0, maximum=shared.settings['truncation_length_max'], step=256, info='Context length. Try lowering this if you run out of memory while loading the model.', value=shared.args.max_seq_len)
|
||||
@ -186,9 +185,7 @@ def create_ui():
|
||||
|
||||
|
||||
def create_event_handlers():
|
||||
shared.gradio['loader'].change(
|
||||
loaders.make_loader_params_visible, gradio('loader'), gradio(loaders.get_all_params())).then(
|
||||
lambda value: gr.update(choices=loaders.get_model_types(value)), gradio('loader'), gradio('model_type'))
|
||||
shared.gradio['loader'].change(loaders.make_loader_params_visible, gradio('loader'), gradio(loaders.get_all_params()))
|
||||
|
||||
# In this event handler, the interface state is read and updated
|
||||
# with the model defaults (if any), and then the model is loaded
|
||||
|
Loading…
Reference in New Issue
Block a user