From e158299fb469dce8f11c45a4d6b710e239778bea Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Thu, 11 Apr 2024 14:50:05 -0700 Subject: [PATCH] Fix loading sharted GGUF models through llamacpp_HF --- modules/llamacpp_hf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/llamacpp_hf.py b/modules/llamacpp_hf.py index e5a05f6e..1bfd667d 100644 --- a/modules/llamacpp_hf.py +++ b/modules/llamacpp_hf.py @@ -192,7 +192,7 @@ class LlamacppHF(PreTrainedModel): if path.is_file(): model_file = path else: - model_file = list(path.glob('*.gguf'))[0] + model_file = sorted(path.glob('*.gguf'))[0] logger.info(f"llama.cpp weights detected: {model_file}\n")