From 9e189947d1d51fdc88aa76490a58f8dce4685110 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Tue, 21 May 2024 10:37:30 -0700 Subject: [PATCH] Minor fix after bd7cc4234d0d2cc890c5e023f67741615c44484a (thanks @belladoreai) --- modules/models_settings.py | 3 +-- modules/ui.py | 1 - modules/ui_model_menu.py | 5 +---- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/modules/models_settings.py b/modules/models_settings.py index 2ecd8a58..2d161ad5 100644 --- a/modules/models_settings.py +++ b/modules/models_settings.py @@ -12,7 +12,6 @@ def get_fallback_settings(): 'wbits': 'None', 'groupsize': 'None', 'desc_act': False, - 'model_type': 'None', 'max_seq_len': 2048, 'n_ctx': 2048, 'rope_freq_base': 0, @@ -199,7 +198,7 @@ def update_model_parameters(state, initial=False): continue # Setting null defaults - if element in ['wbits', 'groupsize', 'model_type'] and value == 'None': + if element in ['wbits', 'groupsize'] and value == 'None': value = vars(shared.args_defaults)[element] elif element in ['cpu_memory'] and value == 0: value = vars(shared.args_defaults)[element] diff --git a/modules/ui.py b/modules/ui.py index 992616de..0126190a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -70,7 +70,6 @@ def list_model_elements(): 'use_double_quant', 'wbits', 'groupsize', - 'model_type', 'pre_layer', 'triton', 'desc_act', diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index d7b4eabb..76ac7530 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -101,7 +101,6 @@ def create_ui(): shared.gradio['threads_batch'] = gr.Slider(label="threads_batch", minimum=0, step=1, maximum=256, value=shared.args.threads_batch) shared.gradio['wbits'] = gr.Dropdown(label="wbits", choices=["None", 1, 2, 3, 4, 8], value=shared.args.wbits if shared.args.wbits > 0 else "None") shared.gradio['groupsize'] = gr.Dropdown(label="groupsize", choices=["None", 32, 64, 128, 1024], value=shared.args.groupsize if shared.args.groupsize > 0 else "None") - shared.gradio['model_type'] = gr.Dropdown(label="model_type", choices=["None"], value=shared.args.model_type or "None") shared.gradio['pre_layer'] = gr.Slider(label="pre_layer", minimum=0, maximum=100, value=shared.args.pre_layer[0] if shared.args.pre_layer is not None else 0) shared.gradio['gpu_split'] = gr.Textbox(label='gpu-split', info='Comma-separated list of VRAM (in GB) to use per GPU. Example: 20,7,7') shared.gradio['max_seq_len'] = gr.Slider(label='max_seq_len', minimum=0, maximum=shared.settings['truncation_length_max'], step=256, info='Context length. Try lowering this if you run out of memory while loading the model.', value=shared.args.max_seq_len) @@ -186,9 +185,7 @@ def create_ui(): def create_event_handlers(): - shared.gradio['loader'].change( - loaders.make_loader_params_visible, gradio('loader'), gradio(loaders.get_all_params())).then( - lambda value: gr.update(choices=loaders.get_model_types(value)), gradio('loader'), gradio('model_type')) + shared.gradio['loader'].change(loaders.make_loader_params_visible, gradio('loader'), gradio(loaders.get_all_params())) # In this event handler, the interface state is read and updated # with the model defaults (if any), and then the model is loaded