diff --git a/modules/prompts.py b/modules/prompts.py index 1d1a66b5..ce652def 100644 --- a/modules/prompts.py +++ b/modules/prompts.py @@ -48,4 +48,4 @@ def count_tokens(text): tokens = get_encoded_length(text) return str(tokens) except: - return '-1' + return '0' diff --git a/modules/text_generation.py b/modules/text_generation.py index d0f37007..8c2fe0b2 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -107,7 +107,6 @@ def _generate_reply(question, state, stopping_strings=None, is_chat=False, escap def encode(prompt, add_special_tokens=True, add_bos_token=True, truncation_length=None): if shared.tokenizer is None: - logger.error('No tokenizer is loaded') raise ValueError('No tokenizer is loaded') if shared.model.__class__.__name__ in ['LlamaCppModel', 'RWKVModel', 'CtransformersModel', 'Exllamav2Model']: @@ -138,7 +137,6 @@ def encode(prompt, add_special_tokens=True, add_bos_token=True, truncation_lengt def decode(output_ids, skip_special_tokens=True): if shared.tokenizer is None: - logger.error('No tokenizer is loaded') raise ValueError('No tokenizer is loaded') return shared.tokenizer.decode(output_ids, skip_special_tokens)