From 0ede2965d5e53420a7dfe48137e353f72de084e4 Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Sun, 17 Sep 2023 18:46:08 -0700 Subject: [PATCH] Remove an error message --- modules/prompts.py | 2 +- modules/text_generation.py | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/modules/prompts.py b/modules/prompts.py index 1d1a66b5..ce652def 100644 --- a/modules/prompts.py +++ b/modules/prompts.py @@ -48,4 +48,4 @@ def count_tokens(text): tokens = get_encoded_length(text) return str(tokens) except: - return '-1' + return '0' diff --git a/modules/text_generation.py b/modules/text_generation.py index d0f37007..8c2fe0b2 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -107,7 +107,6 @@ def _generate_reply(question, state, stopping_strings=None, is_chat=False, escap def encode(prompt, add_special_tokens=True, add_bos_token=True, truncation_length=None): if shared.tokenizer is None: - logger.error('No tokenizer is loaded') raise ValueError('No tokenizer is loaded') if shared.model.__class__.__name__ in ['LlamaCppModel', 'RWKVModel', 'CtransformersModel', 'Exllamav2Model']: @@ -138,7 +137,6 @@ def encode(prompt, add_special_tokens=True, add_bos_token=True, truncation_lengt def decode(output_ids, skip_special_tokens=True): if shared.tokenizer is None: - logger.error('No tokenizer is loaded') raise ValueError('No tokenizer is loaded') return shared.tokenizer.decode(output_ids, skip_special_tokens)