From a38a37b3b33c09f9ef7b0968df83bdc01e8d90ce Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sun, 19 May 2024 10:57:42 -0700 Subject: [PATCH] llama.cpp: default n_gpu_layers to the maximum value for the model automatically --- modules/models_settings.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/models_settings.py b/modules/models_settings.py index a7fed427..8576a16a 100644 --- a/modules/models_settings.py +++ b/modules/models_settings.py @@ -56,6 +56,7 @@ def get_model_metadata(model): model_file = list(path.glob('*.gguf'))[0] metadata = metadata_gguf.load_metadata(model_file) + for k in metadata: if k.endswith('context_length'): model_settings['n_ctx'] = metadata[k] @@ -63,6 +64,9 @@ def get_model_metadata(model): model_settings['rope_freq_base'] = metadata[k] elif k.endswith('rope.scale_linear'): model_settings['compress_pos_emb'] = metadata[k] + elif k.endswith('block_count'): + model_settings['n_gpu_layers'] = metadata[k] + 1 + if 'tokenizer.chat_template' in metadata: template = metadata['tokenizer.chat_template'] eos_token = metadata['tokenizer.ggml.tokens'][metadata['tokenizer.ggml.eos_token_id']]