From def3b69002477c14d83a5f3eefc8f3cdeaa16ec1 Mon Sep 17 00:00:00 2001 From: ThisIsPIRI <34787507+ThisIsPIRI@users.noreply.github.com> Date: Sun, 18 Jun 2023 21:14:06 +0000 Subject: [PATCH] Fix loading condition for universal llama tokenizer (#2753) --- modules/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/models.py b/modules/models.py index 4a4ea718..1aba66c5 100644 --- a/modules/models.py +++ b/modules/models.py @@ -86,7 +86,7 @@ def load_tokenizer(model_name, model): tokenizer = AutoTokenizer.from_pretrained(Path(f"{shared.args.model_dir}/gpt-j-6B/")) elif type(model) is transformers.LlamaForCausalLM or "LlamaGPTQForCausalLM" in str(type(model)): # Try to load an universal LLaMA tokenizer - if any(s in shared.model_name.lower() for s in ['llava', 'oasst']): + if not any(s in shared.model_name.lower() for s in ['llava', 'oasst']): for p in [Path(f"{shared.args.model_dir}/llama-tokenizer/"), Path(f"{shared.args.model_dir}/oobabooga_llama-tokenizer/")]: if p.exists(): logger.info(f"Loading the universal LLaMA tokenizer from {p}...")