From 0227e738ed4c2ecd3f1e3f6ff74137bee894f49f Mon Sep 17 00:00:00 2001 From: Jakub Strnad Date: Tue, 16 May 2023 00:51:23 +0200 Subject: [PATCH] Add settings UI for llama.cpp and fixed reloading of llama.cpp models (#2087) --- modules/llamacpp_model.py | 3 +++ modules/ui.py | 2 +- server.py | 15 ++++++++++++++- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index fa8c3045..65577ee0 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -16,6 +16,9 @@ class LlamaCppModel: def __init__(self): self.initialized = False + def __del__(self): + self.model.__del__() + @classmethod def from_pretrained(self, path): result = self() diff --git a/modules/ui.py b/modules/ui.py index 7abea914..7d804fe0 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -27,7 +27,7 @@ theme = gr.themes.Default( def list_model_elements(): - elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'wbits', 'groupsize', 'model_type', 'pre_layer'] + elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'threads', 'n_batch', 'no-mmap', 'mlock', 'n_gpu_layers'] for i in range(torch.cuda.device_count()): elements.append(f'gpu_memory_{i}') return elements diff --git a/server.py b/server.py index 576bfba7..608b4e0f 100644 --- a/server.py +++ b/server.py @@ -360,7 +360,20 @@ def create_model_menus(): shared.gradio['download_model_button'] = gr.Button("Download") with gr.Column(): - shared.gradio['model_status'] = gr.Markdown('No model is loaded' if shared.model_name == 'None' else 'Ready') + with gr.Box(): + gr.Markdown('llama.cpp parameters') + with gr.Row(): + with gr.Column(): + shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=32, value=shared.args.threads) + shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, value=shared.args.n_batch) + shared.gradio['n_gpu_layers'] = gr.Slider(label="n-gpu-layers", minimum=0, maximum=128, value=shared.args.n_gpu_layers) + + with gr.Column(): + shared.gradio['no-mmap'] = gr.Checkbox(label="no-mmap", value=shared.args.no_mmap) + shared.gradio['mlock'] = gr.Checkbox(label="mlock", value=shared.args.mlock) + + with gr.Row(): + shared.gradio['model_status'] = gr.Markdown('No model is loaded' if shared.model_name == 'None' else 'Ready') # In this event handler, the interface state is read and updated # with the model defaults (if any), and then the model is loaded