Make chat minimally work with LLaMA

This commit is contained in:
oobabooga 2023-03-03 15:04:41 -03:00
parent ea5c5eb3da
commit 5b354817f6

View File

@ -21,7 +21,7 @@ def get_max_prompt_length(tokens):
return max_length return max_length
def encode(prompt, tokens_to_generate=0, add_special_tokens=True): def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
if shared.is_RWKV: if shared.is_RWKV or shared.is_LLaMA:
return prompt return prompt
input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens) input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)