diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py index 1d068229..f611dc27 100644 --- a/extensions/silero_tts/script.py +++ b/extensions/silero_tts/script.py @@ -81,6 +81,7 @@ def input_modifier(string): if (shared.args.chat or shared.args.cai_chat) and len(shared.history['internal']) > 0: shared.history['visible'][-1] = [shared.history['visible'][-1][0], shared.history['visible'][-1][1].replace('controls autoplay>','controls>')] + shared.processing_message = "*Is recording a voice message...*" return string def output_modifier(string): @@ -119,6 +120,7 @@ def output_modifier(string): if params['show_text']: string += f'\n\n{original_string}' + shared.processing_message = "*Is typing...*" return string def bot_prefix_modifier(string): diff --git a/modules/chat.py b/modules/chat.py index d78278c4..bd45b879 100644 --- a/modules/chat.py +++ b/modules/chat.py @@ -126,8 +126,9 @@ def chatbot_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typical else: prompt = custom_generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size) + # Yield *Is typing...* if not regenerate: - yield shared.history['visible']+[[visible_text, '*Is typing...*']] + yield shared.history['visible']+[[visible_text, shared.processing_message]] # Generate reply = '' @@ -168,7 +169,8 @@ def impersonate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typ prompt = generate_chat_prompt(text, max_new_tokens, name1, name2, context, chat_prompt_size, impersonate=True) reply = '' - yield '*Is typing...*' + # Yield *Is typing...* + yield shared.processing_message for i in range(chat_generation_attempts): for reply in generate_reply(prompt+reply, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=eos_token, stopping_string=f"\n{name2}:"): reply, next_character_found = extract_message_from_reply(prompt, reply, name1, name2, check, impersonate=True) @@ -187,8 +189,8 @@ def regenerate_wrapper(text, max_new_tokens, do_sample, temperature, top_p, typi else: last_visible = shared.history['visible'].pop() last_internal = shared.history['internal'].pop() - - yield generate_chat_output(shared.history['visible']+[[last_visible[0], '*Is typing...*']], name1, name2, shared.character) + # Yield '*Is typing...*' + yield generate_chat_output(shared.history['visible']+[[last_visible[0], shared.processing_message]], name1, name2, shared.character) for _history in chatbot_wrapper(last_internal[0], max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size, chat_generation_attempts, regenerate=True): if shared.args.cai_chat: shared.history['visible'][-1] = [last_visible[0], _history[-1][1]] diff --git a/modules/shared.py b/modules/shared.py index 8fcd4745..5411009a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -11,6 +11,7 @@ is_RWKV = False history = {'internal': [], 'visible': []} character = 'None' stop_everything = False +processing_message = '*Is typing...*' # UI elements (buttons, sliders, HTML, etc) gradio = {}