From 181743fd9767c049837a2b24cde5b155dcb5a51b Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 8 Dec 2023 05:16:23 -0800 Subject: [PATCH] Fix missing spaces tokenizer issue (closes #4834) --- modules/text_generation.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/text_generation.py b/modules/text_generation.py index 417ac194..f292bf1e 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -265,9 +265,8 @@ def apply_stopping_strings(reply, all_stop_strings): def get_reply_from_output_ids(output_ids, state, starting_from=0): reply = decode(output_ids[starting_from:], state['skip_special_tokens']) - if type(shared.tokenizer) in [transformers.LlamaTokenizer, transformers.LlamaTokenizerFast] and len(output_ids) > starting_from: - if shared.tokenizer.convert_ids_to_tokens(int(output_ids[starting_from])).startswith('▁'): - reply = ' ' + reply + if hasattr(shared.tokenizer, 'convert_ids_to_tokens') and len(output_ids) > starting_from and shared.tokenizer.convert_ids_to_tokens(int(output_ids[starting_from])).startswith('▁'): + reply = ' ' + reply return reply