diff --git a/modules/chat.py b/modules/chat.py index 4e094329..a297d84c 100644 --- a/modules/chat.py +++ b/modules/chat.py @@ -253,7 +253,7 @@ def tokenize_dialogue(dialogue, name1, name2): _history.append(entry) entry = ['', ''] - print(f"\033[1;32;1m\nDialogue tokenized to:\033[0;37;0m\n", end='') + print("\033[1;32;1m\nDialogue tokenized to:\033[0;37;0m\n", end='') for row in _history: for column in row: print("\n") @@ -301,8 +301,8 @@ def load_history(file, name1, name2): shared.history['visible'] = copy.deepcopy(shared.history['internal']) def load_default_history(name1, name2): - if Path(f'logs/persistent.json').exists(): - load_history(open(Path(f'logs/persistent.json'), 'rb').read(), name1, name2) + if Path('logs/persistent.json').exists(): + load_history(open(Path('logs/persistent.json'), 'rb').read(), name1, name2) else: shared.history['internal'] = [] shared.history['visible'] = [] @@ -370,5 +370,5 @@ def upload_tavern_character(img, name1, name2): def upload_your_profile_picture(img): img = Image.open(io.BytesIO(img)) - img.save(Path(f'img_me.png')) - print(f'Profile picture saved to "img_me.png"') + img.save(Path('img_me.png')) + print('Profile picture saved to "img_me.png"') diff --git a/modules/extensions.py b/modules/extensions.py index 17d9a381..be829330 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -13,7 +13,7 @@ def load_extensions(): print(f'Loading the extension "{name}"... ', end='') exec(f"import extensions.{name}.script") state[name] = [True, i] - print(f'Ok.') + print('Ok.') # This iterator returns the extensions in the order specified in the command-line def iterator(): diff --git a/modules/models.py b/modules/models.py index 37f7dfd8..0cb9ae6e 100644 --- a/modules/models.py +++ b/modules/models.py @@ -117,7 +117,7 @@ def load_model(model_name): model = eval(command) # Loading the tokenizer - if shared.model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')) and Path(f"models/gpt-j-6B/").exists(): + if shared.model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')) and Path("models/gpt-j-6B/").exists(): tokenizer = AutoTokenizer.from_pretrained(Path("models/gpt-j-6B/")) else: tokenizer = AutoTokenizer.from_pretrained(Path(f"models/{shared.model_name}/")) diff --git a/modules/shared.py b/modules/shared.py index e94b5b65..d4ffc19d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -11,6 +11,9 @@ history = {'internal': [], 'visible': []} character = 'None' stop_everything = False +# UI elements (buttons, sliders, HTML, etc) +gradio = {} + settings = { 'max_new_tokens': 200, 'max_new_tokens_min': 1, diff --git a/modules/text_generation.py b/modules/text_generation.py index 02d1210d..81e8da9f 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -126,9 +126,9 @@ def generate_reply(question, tokens, do_sample, max_new_tokens, temperature, top if shared.args.deepspeed: generate_params.append("synced_gpus=True") if shared.args.no_stream: - generate_params.append(f"max_new_tokens=tokens") + generate_params.append("max_new_tokens=tokens") else: - generate_params.append(f"max_new_tokens=8") + generate_params.append("max_new_tokens=8") if shared.soft_prompt: inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids) diff --git a/server.py b/server.py index 004daa2c..c63127df 100644 --- a/server.py +++ b/server.py @@ -100,50 +100,49 @@ def create_settings_menus(): with gr.Row(): with gr.Column(): with gr.Row(): - model_menu = gr.Dropdown(choices=available_models, value=shared.model_name, label='Model') - ui.create_refresh_button(model_menu, lambda : None, lambda : {"choices": get_available_models()}, "refresh-button") + shared.gradio['model_menu'] = gr.Dropdown(choices=available_models, value=shared.model_name, label='Model') + ui.create_refresh_button(shared.gradio['model_menu'], lambda : None, lambda : {"choices": get_available_models()}, "refresh-button") with gr.Column(): with gr.Row(): - preset_menu = gr.Dropdown(choices=available_presets, value=shared.settings[f'preset{suffix}'] if not shared.args.flexgen else 'Naive', label='Generation parameters preset') - ui.create_refresh_button(preset_menu, lambda : None, lambda : {"choices": get_available_presets()}, "refresh-button") + shared.gradio['preset_menu'] = gr.Dropdown(choices=available_presets, value=shared.settings[f'preset{suffix}'] if not shared.args.flexgen else 'Naive', label='Generation parameters preset') + ui.create_refresh_button(shared.gradio['preset_menu'], lambda : None, lambda : {"choices": get_available_presets()}, "refresh-button") with gr.Accordion("Custom generation parameters", open=False, elem_id="accordion"): with gr.Row(): - do_sample = gr.Checkbox(value=generate_params['do_sample'], label="do_sample") - temperature = gr.Slider(0.01, 1.99, value=generate_params['temperature'], step=0.01, label="temperature") + shared.gradio['do_sample'] = gr.Checkbox(value=generate_params['do_sample'], label="do_sample") + shared.gradio['temperature'] = gr.Slider(0.01, 1.99, value=generate_params['temperature'], step=0.01, label="temperature") with gr.Row(): - top_k = gr.Slider(0,200,value=generate_params['top_k'],step=1,label="top_k") - top_p = gr.Slider(0.0,1.0,value=generate_params['top_p'],step=0.01,label="top_p") + shared.gradio['top_k'] = gr.Slider(0,200,value=generate_params['top_k'],step=1,label="top_k") + shared.gradio['top_p'] = gr.Slider(0.0,1.0,value=generate_params['top_p'],step=0.01,label="top_p") with gr.Row(): - repetition_penalty = gr.Slider(1.0,4.99,value=generate_params['repetition_penalty'],step=0.01,label="repetition_penalty") - no_repeat_ngram_size = gr.Slider(0, 20, step=1, value=generate_params["no_repeat_ngram_size"], label="no_repeat_ngram_size") + shared.gradio['repetition_penalty'] = gr.Slider(1.0,4.99,value=generate_params['repetition_penalty'],step=0.01,label="repetition_penalty") + shared.gradio['no_repeat_ngram_size'] = gr.Slider(0, 20, step=1, value=generate_params["no_repeat_ngram_size"], label="no_repeat_ngram_size") with gr.Row(): - typical_p = gr.Slider(0.0,1.0,value=generate_params['typical_p'],step=0.01,label="typical_p") - min_length = gr.Slider(0, 2000, step=1, value=generate_params["min_length"] if shared.args.no_stream else 0, label="min_length", interactive=shared.args.no_stream) + shared.gradio['typical_p'] = gr.Slider(0.0,1.0,value=generate_params['typical_p'],step=0.01,label="typical_p") + shared.gradio['min_length'] = gr.Slider(0, 2000, step=1, value=generate_params["min_length"] if shared.args.no_stream else 0, label="min_length", interactive=shared.args.no_stream) gr.Markdown("Contrastive search:") - penalty_alpha = gr.Slider(0, 5, value=generate_params["penalty_alpha"], label="penalty_alpha") + shared.gradio['penalty_alpha'] = gr.Slider(0, 5, value=generate_params["penalty_alpha"], label="penalty_alpha") gr.Markdown("Beam search (uses a lot of VRAM):") with gr.Row(): - num_beams = gr.Slider(1, 20, step=1, value=generate_params["num_beams"], label="num_beams") - length_penalty = gr.Slider(-5, 5, value=generate_params["length_penalty"], label="length_penalty") - early_stopping = gr.Checkbox(value=generate_params["early_stopping"], label="early_stopping") + shared.gradio['num_beams'] = gr.Slider(1, 20, step=1, value=generate_params["num_beams"], label="num_beams") + shared.gradio['length_penalty'] = gr.Slider(-5, 5, value=generate_params["length_penalty"], label="length_penalty") + shared.gradio['early_stopping'] = gr.Checkbox(value=generate_params["early_stopping"], label="early_stopping") with gr.Accordion("Soft prompt", open=False, elem_id="accordion"): with gr.Row(): - softprompts_menu = gr.Dropdown(choices=available_softprompts, value="None", label='Soft prompt') - ui.create_refresh_button(softprompts_menu, lambda : None, lambda : {"choices": get_available_softprompts()}, "refresh-button") + shared.gradio['softprompts_menu'] = gr.Dropdown(choices=available_softprompts, value="None", label='Soft prompt') + ui.create_refresh_button(shared.gradio['softprompts_menu'], lambda : None, lambda : {"choices": get_available_softprompts()}, "refresh-button") gr.Markdown('Upload a soft prompt (.zip format):') with gr.Row(): - upload_softprompt = gr.File(type='binary', file_types=[".zip"]) + shared.gradio['upload_softprompt'] = gr.File(type='binary', file_types=[".zip"]) - model_menu.change(load_model_wrapper, [model_menu], [model_menu], show_progress=True) - preset_menu.change(load_preset_values, [preset_menu], [do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping]) - softprompts_menu.change(load_soft_prompt, [softprompts_menu], [softprompts_menu], show_progress=True) - upload_softprompt.upload(upload_soft_prompt, [upload_softprompt], [softprompts_menu]) - return preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping + shared.gradio['model_menu'].change(load_model_wrapper, [shared.gradio['model_menu']], [shared.gradio['model_menu']], show_progress=True) + shared.gradio['preset_menu'].change(load_preset_values, [shared.gradio['preset_menu']], [shared.gradio['do_sample'], shared.gradio['temperature'], shared.gradio['top_p'], shared.gradio['typical_p'], shared.gradio['repetition_penalty'], shared.gradio['top_k'], shared.gradio['min_length'], shared.gradio['no_repeat_ngram_size'], shared.gradio['num_beams'], shared.gradio['penalty_alpha'], shared.gradio['length_penalty'], shared.gradio['early_stopping']]) + shared.gradio['softprompts_menu'].change(load_soft_prompt, [shared.gradio['softprompts_menu']], [shared.gradio['softprompts_menu']], show_progress=True) + shared.gradio['upload_softprompt'].upload(upload_soft_prompt, [shared.gradio['upload_softprompt']], [shared.gradio['softprompts_menu']]) available_models = get_available_models() available_presets = get_available_presets() @@ -174,10 +173,9 @@ else: shared.model, shared.tokenizer = load_model(shared.model_name) # UI settings -buttons = {} gen_events = [] suffix = '_pygmalion' if 'pygmalion' in shared.model_name.lower() else '' -description = f"\n\n# Text generation lab\nGenerate text using Large Language Models.\n" +description = "\n\n# Text generation lab\nGenerate text using Large Language Models.\n" if shared.model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')): default_text = shared.settings['prompt_gpt4chan'] elif re.match('(rosey|chip|joi)_.*_instruct.*', shared.model_name.lower()) is not None: @@ -186,176 +184,178 @@ else: default_text = shared.settings['prompt'] if shared.args.chat or shared.args.cai_chat: - with gr.Blocks(css=ui.css+ui.chat_css, analytics_enabled=False) as interface: - interface.load(lambda : chat.load_default_history(shared.settings[f'name1{suffix}'], shared.settings[f'name2{suffix}']), None, None) + with gr.Blocks(css=ui.css+ui.chat_css, analytics_enabled=False) as shared.gradio['interface']: + shared.gradio['interface'].load(lambda : chat.load_default_history(shared.settings[f'name1{suffix}'], shared.settings[f'name2{suffix}']), None, None) if shared.args.cai_chat: - display = gr.HTML(value=generate_chat_html(shared.history['visible'], shared.settings[f'name1{suffix}'], shared.settings[f'name2{suffix}'], shared.character)) + shared.gradio['display'] = gr.HTML(value=generate_chat_html(shared.history['visible'], shared.settings[f'name1{suffix}'], shared.settings[f'name2{suffix}'], shared.character)) else: - display = gr.Chatbot(value=shared.history['visible']) - textbox = gr.Textbox(label='Input') + shared.gradio['display'] = gr.Chatbot(value=shared.history['visible']) + shared.gradio['textbox'] = gr.Textbox(label='Input') with gr.Row(): - buttons["Stop"] = gr.Button("Stop") - buttons["Generate"] = gr.Button("Generate") - buttons["Regenerate"] = gr.Button("Regenerate") + shared.gradio["Stop"] = gr.Button("Stop") + shared.gradio["Generate"] = gr.Button("Generate") + shared.gradio["Regenerate"] = gr.Button("Regenerate") with gr.Row(): - buttons["Impersonate"] = gr.Button("Impersonate") - buttons["Remove last"] = gr.Button("Remove last") - buttons["Clear history"] = gr.Button("Clear history") + shared.gradio["Impersonate"] = gr.Button("Impersonate") + shared.gradio["Remove last"] = gr.Button("Remove last") + shared.gradio["Clear history"] = gr.Button("Clear history") with gr.Row(): - buttons["Send last reply to input"] = gr.Button("Send last reply to input") - buttons["Replace last reply"] = gr.Button("Replace last reply") + shared.gradio["Send last reply to input"] = gr.Button("Send last reply to input") + shared.gradio["Replace last reply"] = gr.Button("Replace last reply") if shared.args.picture: with gr.Row(): - picture_select = gr.Image(label="Send a picture", type='pil') + shared.gradio['picture_select'] = gr.Image(label="Send a picture", type='pil') with gr.Tab("Chat settings"): - name1 = gr.Textbox(value=shared.settings[f'name1{suffix}'], lines=1, label='Your name') - name2 = gr.Textbox(value=shared.settings[f'name2{suffix}'], lines=1, label='Bot\'s name') - context = gr.Textbox(value=shared.settings[f'context{suffix}'], lines=2, label='Context') + shared.gradio['name1'] = gr.Textbox(value=shared.settings[f'name1{suffix}'], lines=1, label='Your name') + shared.gradio['name2'] = gr.Textbox(value=shared.settings[f'name2{suffix}'], lines=1, label='Bot\'s name') + shared.gradio['context'] = gr.Textbox(value=shared.settings[f'context{suffix}'], lines=2, label='Context') with gr.Row(): - character_menu = gr.Dropdown(choices=available_characters, value="None", label='Character') - ui.create_refresh_button(character_menu, lambda : None, lambda : {"choices": get_available_characters()}, "refresh-button") + shared.gradio['character_menu'] = gr.Dropdown(choices=available_characters, value="None", label='Character') + ui.create_refresh_button(shared.gradio['character_menu'], lambda : None, lambda : {"choices": get_available_characters()}, "refresh-button") with gr.Row(): - check = gr.Checkbox(value=shared.settings[f'stop_at_newline{suffix}'], label='Stop generating at new line character?') + shared.gradio['check'] = gr.Checkbox(value=shared.settings[f'stop_at_newline{suffix}'], label='Stop generating at new line character?') with gr.Row(): with gr.Tab('Chat history'): with gr.Row(): with gr.Column(): gr.Markdown('Upload') - upload_chat_history = gr.File(type='binary', file_types=[".json", ".txt"]) + shared.gradio['upload_chat_history'] = gr.File(type='binary', file_types=[".json", ".txt"]) with gr.Column(): gr.Markdown('Download') - download = gr.File() - buttons["Download"] = gr.Button(value="Click me") + shared.gradio['download'] = gr.File() + shared.gradio["Download"] = gr.Button(value="Click me") with gr.Tab('Upload character'): with gr.Row(): with gr.Column(): gr.Markdown('1. Select the JSON file') - upload_char = gr.File(type='binary', file_types=[".json"]) + shared.gradio['upload_char'] = gr.File(type='binary', file_types=[".json"]) with gr.Column(): gr.Markdown('2. Select your character\'s profile picture (optional)') - upload_img = gr.File(type='binary', file_types=["image"]) - buttons["Upload character"] = gr.Button(value="Submit") + shared.gradio['upload_img'] = gr.File(type='binary', file_types=["image"]) + shared.gradio["Upload character"] = gr.Button(value="Submit") with gr.Tab('Upload your profile picture'): - upload_img_me = gr.File(type='binary', file_types=["image"]) + shared.gradio['upload_img_me'] = gr.File(type='binary', file_types=["image"]) with gr.Tab('Upload TavernAI Character Card'): - upload_img_tavern = gr.File(type='binary', file_types=["image"]) + shared.gradio['upload_img_tavern'] = gr.File(type='binary', file_types=["image"]) with gr.Tab("Generation settings"): with gr.Row(): with gr.Column(): - max_new_tokens = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens']) + shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens']) with gr.Column(): - chat_prompt_size_slider = gr.Slider(minimum=shared.settings['chat_prompt_size_min'], maximum=shared.settings['chat_prompt_size_max'], step=1, label='Maximum prompt size in tokens', value=shared.settings['chat_prompt_size']) - - preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping = create_settings_menus() + shared.gradio['chat_prompt_size_slider'] = gr.Slider(minimum=shared.settings['chat_prompt_size_min'], maximum=shared.settings['chat_prompt_size_max'], step=1, label='Maximum prompt size in tokens', value=shared.settings['chat_prompt_size']) + create_settings_menus() if shared.args.extensions is not None: with gr.Tab("Extensions"): extensions_module.create_extensions_block() - input_params = [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size_slider] + input_params = [shared.gradio[i] for i in ['textbox', 'max_new_tokens', 'do_sample', 'max_new_tokens', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping', 'name1', 'name2', 'context', 'check', 'chat_prompt_size_slider']] if shared.args.picture: - input_params.append(picture_select) + input_params.append(shared.gradio['picture_select']) function_call = "chat.cai_chatbot_wrapper" if shared.args.cai_chat else "chat.chatbot_wrapper" - gen_events.append(buttons["Generate"].click(eval(function_call), input_params, display, show_progress=shared.args.no_stream, api_name="textgen")) - gen_events.append(textbox.submit(eval(function_call), input_params, display, show_progress=shared.args.no_stream)) + gen_events.append(shared.gradio["Generate"].click(eval(function_call), input_params, shared.gradio['display'], show_progress=shared.args.no_stream, api_name="textgen")) + gen_events.append(shared.gradio['textbox'].submit(eval(function_call), input_params, shared.gradio['display'], show_progress=shared.args.no_stream)) if shared.args.picture: - picture_select.upload(eval(function_call), input_params, display, show_progress=shared.args.no_stream) - gen_events.append(buttons["Regenerate"].click(chat.regenerate_wrapper, input_params, display, show_progress=shared.args.no_stream)) - gen_events.append(buttons["Impersonate"].click(chat.impersonate_wrapper, input_params, textbox, show_progress=shared.args.no_stream)) - buttons["Stop"].click(chat.stop_everything_event, [], [], cancels=gen_events) + shared.gradio['picture_select'].upload(eval(function_call), input_params, shared.gradio['display'], show_progress=shared.args.no_stream) + gen_events.append(shared.gradio["Regenerate"].click(chat.regenerate_wrapper, input_params, shared.gradio['display'], show_progress=shared.args.no_stream)) + gen_events.append(shared.gradio["Impersonate"].click(chat.impersonate_wrapper, input_params, shared.gradio['textbox'], show_progress=shared.args.no_stream)) + shared.gradio["Stop"].click(chat.stop_everything_event, [], [], cancels=gen_events) - buttons["Send last reply to input"].click(chat.send_last_reply_to_input, [], textbox, show_progress=shared.args.no_stream) - buttons["Replace last reply"].click(chat.replace_last_reply, [textbox, name1, name2], display, show_progress=shared.args.no_stream) - buttons["Clear history"].click(chat.clear_chat_log, [name1, name2], display) - buttons["Remove last"].click(chat.remove_last_message, [name1, name2], [display, textbox], show_progress=False) - buttons["Download"].click(chat.save_history, inputs=[], outputs=[download]) - buttons["Upload character"].click(chat.upload_character, [upload_char, upload_img], [character_menu]) + shared.gradio["Send last reply to input"].click(chat.send_last_reply_to_input, [], shared.gradio['textbox'], show_progress=shared.args.no_stream) + shared.gradio["Replace last reply"].click(chat.replace_last_reply, [shared.gradio['textbox'], shared.gradio['name1'], shared.gradio['name2']], shared.gradio['display'], show_progress=shared.args.no_stream) + shared.gradio["Clear history"].click(chat.clear_chat_log, [shared.gradio['name1'], shared.gradio['name2']], shared.gradio['display']) + shared.gradio["Remove last"].click(chat.remove_last_message, [shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['display'], shared.gradio['textbox']], show_progress=False) + shared.gradio["Download"].click(chat.save_history, inputs=[], outputs=[shared.gradio['download']]) + shared.gradio["Upload character"].click(chat.upload_character, [shared.gradio['upload_char'], shared.gradio['upload_img']], [shared.gradio['character_menu']]) # Clearing stuff and saving the history for i in ["Generate", "Regenerate", "Replace last reply"]: - buttons[i].click(lambda x: "", textbox, textbox, show_progress=False) - buttons[i].click(lambda : chat.save_history(timestamp=False), [], [], show_progress=False) - buttons["Clear history"].click(lambda : chat.save_history(timestamp=False), [], [], show_progress=False) - textbox.submit(lambda x: "", textbox, textbox, show_progress=False) - textbox.submit(lambda : chat.save_history(timestamp=False), [], [], show_progress=False) + shared.gradio[i].click(lambda x: "", shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False) + shared.gradio[i].click(lambda : chat.save_history(timestamp=False), [], [], show_progress=False) + shared.gradio["Clear history"].click(lambda : chat.save_history(timestamp=False), [], [], show_progress=False) + shared.gradio['textbox'].submit(lambda x: "", shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False) + shared.gradio['textbox'].submit(lambda : chat.save_history(timestamp=False), [], [], show_progress=False) - character_menu.change(chat.load_character, [character_menu, name1, name2], [name2, context, display]) - upload_chat_history.upload(chat.load_history, [upload_chat_history, name1, name2], []) - upload_img_tavern.upload(chat.upload_tavern_character, [upload_img_tavern, name1, name2], [character_menu]) - upload_img_me.upload(chat.upload_your_profile_picture, [upload_img_me], []) + shared.gradio['character_menu'].change(chat.load_character, [shared.gradio['character_menu'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['name2'], shared.gradio['context'], shared.gradio['display']]) + shared.gradio['upload_chat_history'].upload(chat.load_history, [shared.gradio['upload_chat_history'], shared.gradio['name1'], shared.gradio['name2']], []) + shared.gradio['upload_img_tavern'].upload(chat.upload_tavern_character, [shared.gradio['upload_img_tavern'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['character_menu']]) + shared.gradio['upload_img_me'].upload(chat.upload_your_profile_picture, [shared.gradio['upload_img_me']], []) if shared.args.picture: - picture_select.upload(lambda : None, [], [picture_select], show_progress=False) + shared.gradio['picture_select'].upload(lambda : None, [], [shared.gradio['picture_select']], show_progress=False) reload_func = chat.redraw_html if shared.args.cai_chat else lambda : shared.history['visible'] - reload_inputs = [name1, name2] if shared.args.cai_chat else [] - upload_chat_history.upload(reload_func, reload_inputs, [display]) - upload_img_me.upload(reload_func, reload_inputs, [display]) - interface.load(reload_func, reload_inputs, [display], show_progress=True) + reload_inputs = [shared.gradio['name1'], shared.gradio['name2']] if shared.args.cai_chat else [] + shared.gradio['upload_chat_history'].upload(reload_func, reload_inputs, [shared.gradio['display']]) + shared.gradio['upload_img_me'].upload(reload_func, reload_inputs, [shared.gradio['display']]) + shared.gradio['interface'].load(reload_func, reload_inputs, [shared.gradio['display']], show_progress=True) elif shared.args.notebook: - with gr.Blocks(css=ui.css, analytics_enabled=False) as interface: + with gr.Blocks(css=ui.css, analytics_enabled=False) as shared.gradio['interface']: gr.Markdown(description) with gr.Tab('Raw'): - textbox = gr.Textbox(value=default_text, lines=23) + shared.gradio['textbox'] = gr.Textbox(value=default_text, lines=23) with gr.Tab('Markdown'): - markdown = gr.Markdown() + shared.gradio['markdown'] = gr.Markdown() with gr.Tab('HTML'): - html = gr.HTML() + shared.gradio['html'] = gr.HTML() - buttons["Generate"] = gr.Button("Generate") - buttons["Stop"] = gr.Button("Stop") + shared.gradio["Generate"] = gr.Button("Generate") + shared.gradio["Stop"] = gr.Button("Stop") - max_new_tokens = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens']) - - preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping = create_settings_menus() + shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens']) + create_settings_menus() if shared.args.extensions is not None: extensions_module.create_extensions_block() - gen_events.append(buttons["Generate"].click(generate_reply, [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping], [textbox, markdown, html], show_progress=shared.args.no_stream, api_name="textgen")) - gen_events.append(textbox.submit(generate_reply, [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping], [textbox, markdown, html], show_progress=shared.args.no_stream)) - buttons["Stop"].click(None, None, None, cancels=gen_events) + input_params = [shared.gradio[k] for k in ('textbox', 'max_new_tokens', 'do_sample', 'max_new_tokens', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping')] + output_params = [shared.gradio[k] for k in ["textbox", "markdown", "html"]] + gen_events.append(shared.gradio["Generate"].click(generate_reply, input_params, output_params, show_progress=shared.args.no_stream, api_name="textgen")) + gen_events.append(shared.gradio['textbox'].submit(generate_reply, input_params, output_params, show_progress=shared.args.no_stream)) + shared.gradio["Stop"].click(None, None, None, cancels=gen_events) else: - with gr.Blocks(css=ui.css, analytics_enabled=False) as interface: + with gr.Blocks(css=ui.css, analytics_enabled=False) as shared.gradio['interface']: gr.Markdown(description) with gr.Row(): with gr.Column(): - textbox = gr.Textbox(value=default_text, lines=15, label='Input') - max_new_tokens = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens']) - buttons["Generate"] = gr.Button("Generate") + shared.gradio['textbox'] = gr.Textbox(value=default_text, lines=15, label='Input') + shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens']) + shared.gradio["Generate"] = gr.Button("Generate") with gr.Row(): with gr.Column(): - buttons["Continue"] = gr.Button("Continue") + shared.gradio["Continue"] = gr.Button("Continue") with gr.Column(): - buttons["Stop"] = gr.Button("Stop") + shared.gradio["Stop"] = gr.Button("Stop") - preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping = create_settings_menus() + create_settings_menus() if shared.args.extensions is not None: extensions_module.create_extensions_block() with gr.Column(): with gr.Tab('Raw'): - output_textbox = gr.Textbox(lines=15, label='Output') + shared.gradio['output_textbox'] = gr.Textbox(lines=15, label='Output') with gr.Tab('Markdown'): - markdown = gr.Markdown() + shared.gradio['markdown'] = gr.Markdown() with gr.Tab('HTML'): - html = gr.HTML() + shared.gradio['html'] = gr.HTML() - gen_events.append(buttons["Generate"].click(generate_reply, [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping], [output_textbox, markdown, html], show_progress=shared.args.no_stream, api_name="textgen")) - gen_events.append(textbox.submit(generate_reply, [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping], [output_textbox, markdown, html], show_progress=shared.args.no_stream)) - gen_events.append(buttons["Continue"].click(generate_reply, [output_textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping], [output_textbox, markdown, html], show_progress=shared.args.no_stream)) - buttons["Stop"].click(None, None, None, cancels=gen_events) + input_params = [shared.gradio[k] for k in ['textbox', 'max_new_tokens', 'do_sample', 'max_new_tokens', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']] + output_params = [shared.gradio[k] for k in ['output_textbox', 'markdown', 'html']] + gen_events.append(shared.gradio['Generate'].click(generate_reply, input_params, output_params, show_progress=shared.args.no_stream, api_name="textgen")) + gen_events.append(shared.gradio['textbox'].submit(generate_reply, input_params, output_params, show_progress=shared.args.no_stream)) + gen_events.append(shared.gradio['Continue'].click(generate_reply, [shared.gradio['output_textbox']] + input_params[1:], output_params, show_progress=shared.args.no_stream)) + shared.gradio["Stop"].click(None, None, None, cancels=gen_events) -interface.queue() +shared.gradio['interface'].queue() if shared.args.listen: - interface.launch(prevent_thread_lock=True, share=shared.args.share, server_name="0.0.0.0", server_port=shared.args.listen_port) + shared.gradio['interface'].launch(prevent_thread_lock=True, share=shared.args.share, server_name="0.0.0.0", server_port=shared.args.listen_port) else: - interface.launch(prevent_thread_lock=True, share=shared.args.share, server_port=shared.args.listen_port) + shared.gradio['interface'].launch(prevent_thread_lock=True, share=shared.args.share, server_port=shared.args.listen_port) # I think that I will need this later while True: