mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 08:07:56 +01:00
Remove obsolete pre_layer parameter
This commit is contained in:
parent
46174a2d33
commit
2d196ed2fe
@ -204,14 +204,11 @@ def update_model_parameters(state, initial=False):
|
|||||||
value = vars(shared.args_defaults)[element]
|
value = vars(shared.args_defaults)[element]
|
||||||
|
|
||||||
# Making some simple conversions
|
# Making some simple conversions
|
||||||
if element in ['wbits', 'groupsize', 'pre_layer']:
|
if element in ['wbits', 'groupsize']:
|
||||||
value = int(value)
|
value = int(value)
|
||||||
elif element == 'cpu_memory' and value is not None:
|
elif element == 'cpu_memory' and value is not None:
|
||||||
value = f"{value}MiB"
|
value = f"{value}MiB"
|
||||||
|
|
||||||
if element in ['pre_layer']:
|
|
||||||
value = [value] if value > 0 else None
|
|
||||||
|
|
||||||
setattr(shared.args, element, value)
|
setattr(shared.args, element, value)
|
||||||
|
|
||||||
found_positive = False
|
found_positive = False
|
||||||
|
@ -70,7 +70,6 @@ def list_model_elements():
|
|||||||
'use_double_quant',
|
'use_double_quant',
|
||||||
'wbits',
|
'wbits',
|
||||||
'groupsize',
|
'groupsize',
|
||||||
'pre_layer',
|
|
||||||
'triton',
|
'triton',
|
||||||
'desc_act',
|
'desc_act',
|
||||||
'no_inject_fused_attention',
|
'no_inject_fused_attention',
|
||||||
|
@ -101,7 +101,6 @@ def create_ui():
|
|||||||
shared.gradio['threads_batch'] = gr.Slider(label="threads_batch", minimum=0, step=1, maximum=256, value=shared.args.threads_batch)
|
shared.gradio['threads_batch'] = gr.Slider(label="threads_batch", minimum=0, step=1, maximum=256, value=shared.args.threads_batch)
|
||||||
shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=shared.args.wbits if shared.args.wbits > 0 else "None")
|
shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=shared.args.wbits if shared.args.wbits > 0 else "None")
|
||||||
shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=shared.args.groupsize if shared.args.groupsize > 0 else "None")
|
shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=shared.args.groupsize if shared.args.groupsize > 0 else "None")
|
||||||
shared.gradio['pre_layer'] = gr.Slider(label="pre_layer", minimum=0, maximum=100, value=shared.args.pre_layer[0] if shared.args.pre_layer is not None else 0)
|
|
||||||
shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7')
|
shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7')
|
||||||
shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=0, maximum=shared.settings['truncation_length_max'], step=256, info='Context length. Try lowering this if you run out of memory while loading the model.', value=shared.args.max_seq_len)
|
shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=0, maximum=shared.settings['truncation_length_max'], step=256, info='Context length. Try lowering this if you run out of memory while loading the model.', value=shared.args.max_seq_len)
|
||||||
with gr.Blocks():
|
with gr.Blocks():
|
||||||
|
Loading…
Reference in New Issue
Block a user