From 2d196ed2feb222c8691c218c2ebc7b1ee773f804 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Wed, 12 Jun 2024 18:56:44 -0700 Subject: [PATCH] Remove obsolete pre_layer parameter --- modules/models_settings.py | 5 +---- modules/ui.py | 1 - modules/ui_model_menu.py | 1 - 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/modules/models_settings.py b/modules/models_settings.py index 2d161ad5..c3712db2 100644 --- a/modules/models_settings.py +++ b/modules/models_settings.py @@ -204,14 +204,11 @@ def update_model_parameters(state, initial=False): value = vars(shared.args_defaults)[element] # Making some simple conversions - if element in ['wbits', 'groupsize', 'pre_layer']: + if element in ['wbits', 'groupsize']: value = int(value) elif element == 'cpu_memory' and value is not None: value = f"{value}MiB" - if element in ['pre_layer']: - value = [value] if value > 0 else None - setattr(shared.args, element, value) found_positive = False diff --git a/modules/ui.py b/modules/ui.py index 0126190a..a202d325 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -70,7 +70,6 @@ def list_model_elements(): 'use_double_quant', 'wbits', 'groupsize', - 'pre_layer', 'triton', 'desc_act', 'no_inject_fused_attention', diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 6cda7ecd..d240bb6a 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -101,7 +101,6 @@ def create_ui(): shared.gradio['threads_batch'] = gr.Slider(label="threads_batch", minimum=0, step=1, maximum=256, value=shared.args.threads_batch) shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=shared.args.wbits if shared.args.wbits > 0 else "None") shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=shared.args.groupsize if shared.args.groupsize > 0 else "None") - shared.gradio['pre_layer'] = gr.Slider(label="pre_layer", minimum=0, maximum=100, value=shared.args.pre_layer[0] if shared.args.pre_layer is not None else 0) shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7') shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=0, maximum=shared.settings['truncation_length_max'], step=256, info='Context length. Try lowering this if you run out of memory while loading the model.', value=shared.args.max_seq_len) with gr.Blocks():