mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 16:17:57 +01:00
Make chat minimally work with LLaMA
This commit is contained in:
parent
ea5c5eb3da
commit
5b354817f6
@ -21,7 +21,7 @@ def get_max_prompt_length(tokens):
|
|||||||
return max_length
|
return max_length
|
||||||
|
|
||||||
def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
|
def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
|
||||||
if shared.is_RWKV:
|
if shared.is_RWKV or shared.is_LLaMA:
|
||||||
return prompt
|
return prompt
|
||||||
|
|
||||||
input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)
|
input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens)
|
||||||
|
Loading…
Reference in New Issue
Block a user