diff --git a/modules/text_generation.py b/modules/text_generation.py index 65375124..bd018c05 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -21,8 +21,11 @@ def get_max_prompt_length(tokens): return max_length def encode(prompt, tokens_to_generate=0, add_special_tokens=True): + + # These models do not have explicit tokenizers for now, so + # we return an estimate on the number of tokens if shared.is_RWKV or shared.is_LLaMA: - return prompt + return np.zeros((1, len(prompt)//5)) input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens) if shared.args.cpu: