diff --git a/modules/chat.py b/modules/chat.py index 1b3e27d2..db1f069b 100644 --- a/modules/chat.py +++ b/modules/chat.py @@ -117,6 +117,11 @@ def extract_message_from_reply(reply, state): def chatbot_wrapper(text, state, regenerate=False, _continue=False): + if shared.model_name == 'None': + print("No model is loaded! Select one in the Model tab.") + yield shared.history['visible'] + return + # Defining some variables cumulative_reply = '' last_reply = [shared.history['internal'][-1][1], shared.history['visible'][-1][1]] if _continue else None @@ -190,6 +195,11 @@ def chatbot_wrapper(text, state, regenerate=False, _continue=False): def impersonate_wrapper(text, state): + if shared.model_name == 'None': + print("No model is loaded! Select one in the Model tab.") + yield '' + return + # Defining some variables cumulative_reply = '' eos_token = '\n' if state['stop_at_newline'] else None diff --git a/modules/text_generation.py b/modules/text_generation.py index 15b88264..259e959b 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -124,6 +124,12 @@ def stop_everything_event(): def generate_reply(question, state, eos_token=None, stopping_strings=[]): + + if shared.model_name == 'None': + print("No model is loaded! Select one in the Model tab.") + yield formatted_outputs(question, shared.model_name) + return + clear_torch_cache() seed = set_manual_seed(state['seed']) shared.stop_everything = False