Add settings UI for llama.cpp and fixed reloading of llama.cpp models (#2087)

This commit is contained in:
Jakub Strnad 2023-05-16 00:51:23 +02:00 committed by GitHub
parent 10869de0f4
commit 0227e738ed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 18 additions and 2 deletions

View File

@ -16,6 +16,9 @@ class LlamaCppModel:
def __init__(self): def __init__(self):
self.initialized = False self.initialized = False
def __del__(self):
self.model.__del__()
@classmethod @classmethod
def from_pretrained(self, path): def from_pretrained(self, path):
result = self() result = self()

View File

@ -27,7 +27,7 @@ theme = gr.themes.Default(
def list_model_elements(): def list_model_elements():
elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'wbits', 'groupsize', 'model_type', 'pre_layer'] elements = ['cpu_memory', 'auto_devices', 'disk', 'cpu', 'bf16', 'load_in_8bit', 'wbits', 'groupsize', 'model_type', 'pre_layer', 'threads', 'n_batch', 'no-mmap', 'mlock', 'n_gpu_layers']
for i in range(torch.cuda.device_count()): for i in range(torch.cuda.device_count()):
elements.append(f'gpu_memory_{i}') elements.append(f'gpu_memory_{i}')
return elements return elements

View File

@ -360,7 +360,20 @@ def create_model_menus():
shared.gradio['download_model_button'] = gr.Button("Download") shared.gradio['download_model_button'] = gr.Button("Download")
with gr.Column(): with gr.Column():
shared.gradio['model_status'] = gr.Markdown('No model is loaded' if shared.model_name == 'None' else 'Ready') with gr.Box():
gr.Markdown('llama.cpp parameters')
with gr.Row():
with gr.Column():
shared.gradio['threads'] = gr.Slider(label="threads", minimum=0, step=1, maximum=32, value=shared.args.threads)
shared.gradio['n_batch'] = gr.Slider(label="n_batch", minimum=1, maximum=2048, value=shared.args.n_batch)
shared.gradio['n_gpu_layers'] = gr.Slider(label="n-gpu-layers", minimum=0, maximum=128, value=shared.args.n_gpu_layers)
with gr.Column():
shared.gradio['no-mmap'] = gr.Checkbox(label="no-mmap", value=shared.args.no_mmap)
shared.gradio['mlock'] = gr.Checkbox(label="mlock", value=shared.args.mlock)
with gr.Row():
shared.gradio['model_status'] = gr.Markdown('No model is loaded' if shared.model_name == 'None' else 'Ready')
# In this event handler, the interface state is read and updated # In this event handler, the interface state is read and updated
# with the model defaults (if any), and then the model is loaded # with the model defaults (if any), and then the model is loaded