From 5b354817f6b35c020bd5093d828f21feb4821c15 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 3 Mar 2023 15:04:41 -0300 Subject: [PATCH] Make chat minimally work with LLaMA --- modules/text_generation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/text_generation.py b/modules/text_generation.py index e6ddfb1c..65375124 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -21,7 +21,7 @@ def get_max_prompt_length(tokens): return max_length def encode(prompt, tokens_to_generate=0, add_special_tokens=True): - if shared.is_RWKV: + if shared.is_RWKV or shared.is_LLaMA: return prompt input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)