diff --git a/css/chat_style-TheEncrypted777.css b/css/chat_style-TheEncrypted777.css new file mode 100644 index 00000000..cac8015f --- /dev/null +++ b/css/chat_style-TheEncrypted777.css @@ -0,0 +1,137 @@ +/* All credits to TheEncrypted777: https://www.reddit.com/r/Oobabooga/comments/12xe6vq/updated_css_styling_with_color_customization_for/ */ + +.chat { + margin-left: auto; + margin-right: auto; + max-width: 800px; + height: calc(100vh - 300px); + overflow-y: auto; + padding-right: 20px; + display: flex; + flex-direction: column-reverse; + word-break: break-word; + overflow-wrap: anywhere; +} + +.message { + display: grid; + grid-template-columns: 60px minmax(0, 1fr); + padding-bottom: 28px; + font-size: 18px; + /*Change 'Quicksand' to a font you like or leave it*/ + font-family: Quicksand, Arial, sans-serif; + line-height: 1.428571429; +} + +.circle-you { + background-color: gray; + border-radius: 1rem; + /*Change color to any you like to be the border of your image*/ + border: 2px solid white; +} + +.circle-bot { + background-color: gray; + border-radius: 1rem; + /*Change color to any you like to be the border of the bot's image*/ + border: 2px solid white; +} + +.circle-bot img, +.circle-you img { + border-radius: 10%; + width: 100%; + height: 100%; + object-fit: cover; +} + +.circle-you, .circle-bot { + /*You can set the size of the profile images here, but if you do, you have to also adjust the .text{padding-left: 90px} to a different number according to the width of the image which is right below here*/ + width: 135px; + height: 175px; +} + +.text { + /*Change this to move the message box further left or right depending on the size of your profile pic*/ + padding-left: 90px; + text-shadow: 2px 2px 2px rgb(0, 0, 0); +} + +.text p { + margin-top: 2px; +} + +.username { + padding-left: 10px; + font-size: 22px; + font-weight: bold; + border-top: 1px solid rgb(51, 64, 90); + padding: 3px; +} + +.message-body { + position: relative; + border-radius: 1rem; + border: 1px solid rgba(255, 255, 255, 0.459); + border-radius: 10px; + padding: 10px; + padding-top: 5px; + /*Message gradient background color - remove the line bellow if you don't want a background color or gradient*/ + background: linear-gradient(to bottom, #171730, #1b263f); + } + + /*Adds 2 extra lines at the top and bottom of the message*/ + .message-body:before, + .message-body:after { + content: ""; + position: absolute; + left: 10px; + right: 10px; + height: 1px; + background-color: rgba(255, 255, 255, 0.13); + } + + .message-body:before { + top: 6px; + } + + .message-body:after { + bottom: 6px; + } + + +.message-body img { + max-width: 300px; + max-height: 300px; + border-radius: 20px; +} + +.message-body p { + margin-bottom: 0 !important; + font-size: 18px !important; + line-height: 1.428571429 !important; +} + +.message-body li { + margin-top: 0.5em !important; + margin-bottom: 0.5em !important; +} + +.message-body li > p { + display: inline !important; +} + +.message-body code { + overflow-x: auto; +} +.message-body :not(pre) > code { + white-space: normal !important; +} + +.dark .message-body p em { + color: rgb(138, 138, 138) !important; +} + +.message-body p em { + color: rgb(110, 110, 110) !important; +} diff --git a/css/html_cai_style.css b/css/chat_style-cai-chat.css similarity index 100% rename from css/html_cai_style.css rename to css/chat_style-cai-chat.css diff --git a/css/html_bubble_chat_style.css b/css/chat_style-wpp.css similarity index 100% rename from css/html_bubble_chat_style.css rename to css/chat_style-wpp.css diff --git a/docs/Custom-chat-characters.md b/docs/Chat-mode.md similarity index 78% rename from docs/Custom-chat-characters.md rename to docs/Chat-mode.md index eeb22d1c..08dd290d 100644 --- a/docs/Custom-chat-characters.md +++ b/docs/Chat-mode.md @@ -1,3 +1,5 @@ +## Chat characters + Custom chat mode characters are defined by `.yaml` files inside the `characters` folder. An example is included: [Example.yaml](https://github.com/oobabooga/text-generation-webui/blob/main/characters/Example.yaml) The following fields may be defined: @@ -28,4 +30,16 @@ Once your prompt reaches the 2048 token limit, old messages will be removed one #### Pygmalion format characters -These are also supported out of the box. Simply put the JSON file in the `characters` folder, or upload it directly from the web UI by clicking on the "Upload character" tab at the bottom. \ No newline at end of file +These are also supported out of the box. Simply put the JSON file in the `characters` folder, or upload it directly from the web UI by clicking on the "Upload character" tab at the bottom. + +## Chat styles + +Custom chat styles can be defined in the `text-generation-webui/css` folder. Simply create a new file with name starting in `chat_style-` and ending in `.css` and it will automatically appear in the "Chat style" dropdown menu in the interface. Examples: + +``` +chat_style-cai-chat.css +chat_style-TheEncrypted777.css +chat_style-wpp.css +``` + +You should use the same class names as in `chat_style-cai-chat.css` in your custom style. \ No newline at end of file diff --git a/docs/README.md b/docs/README.md index f43efa68..79dd8b3f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,7 +2,7 @@ ## Table of contents -* [Custom-chat-characters](Custom-chat-characters.md) +* [Custom-chat-characters](Chat-mode.md) * [Docker Compose](Docker.md) * [DeepSpeed](DeepSpeed.md) * [Extensions](Extensions.md) diff --git a/extensions/elevenlabs_tts/script.py b/extensions/elevenlabs_tts/script.py index 03afdc2f..eeff03eb 100644 --- a/extensions/elevenlabs_tts/script.py +++ b/extensions/elevenlabs_tts/script.py @@ -31,14 +31,14 @@ def refresh_voices_dd(): return gr.Dropdown.update(value=all_voices[0], choices=all_voices) -def remove_tts_from_history(name1, name2, mode): +def remove_tts_from_history(name1, name2, mode, style): for i, entry in enumerate(shared.history['internal']): shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]] - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) + return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style) -def toggle_text_in_history(name1, name2, mode): +def toggle_text_in_history(name1, name2, mode, style): for i, entry in enumerate(shared.history['visible']): visible_reply = entry[1] if visible_reply.startswith('')[0]}" ] - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) + return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style) def remove_surrounded_chars(string): @@ -161,7 +161,7 @@ def ui(): gr.update(visible=False)], None, convert_arr ) convert_confirm.click( - remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode']], shared.gradio['display'] + remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'] ) convert_confirm.click(chat.save_history, shared.gradio['mode'], [], show_progress=False) convert_cancel.click( @@ -178,7 +178,7 @@ def ui(): # Toggle message text in history show_text.change(lambda x: params.update({"show_text": x}), show_text, None) show_text.change( - toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode']], shared.gradio['display'] + toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'] ) show_text.change(chat.save_history, shared.gradio['mode'], [], show_progress=False) # Event functions to update the parameters in the backend diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py index 7babc094..136f3f6f 100644 --- a/extensions/silero_tts/script.py +++ b/extensions/silero_tts/script.py @@ -57,13 +57,14 @@ def load_model(): return model -def remove_tts_from_history(name1, name2, mode): +def remove_tts_from_history(name1, name2, mode, style): for i, entry in enumerate(shared.history['internal']): shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]] - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) + + return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style) -def toggle_text_in_history(name1, name2, mode): +def toggle_text_in_history(name1, name2, mode, style): for i, entry in enumerate(shared.history['visible']): visible_reply = entry[1] if visible_reply.startswith('')[0]}\n\n{reply}"] else: shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('')[0]}"] - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) + + return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style) def state_modifier(state): @@ -167,13 +169,13 @@ def ui(): convert_arr = [convert_confirm, convert, convert_cancel] convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr) convert_confirm.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr) - convert_confirm.click(remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode']], shared.gradio['display']) + convert_confirm.click(remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], shared.gradio['display']) convert_confirm.click(chat.save_history, shared.gradio['mode'], [], show_progress=False) convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr) # Toggle message text in history show_text.change(lambda x: params.update({"show_text": x}), show_text, None) - show_text.change(toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode']], shared.gradio['display']) + show_text.change(toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], shared.gradio['display']) show_text.change(chat.save_history, shared.gradio['mode'], [], show_progress=False) # Event functions to update the parameters in the backend diff --git a/models/config.yaml b/models/config.yaml index 9dd58ad4..7690d527 100644 --- a/models/config.yaml +++ b/models/config.yaml @@ -3,7 +3,7 @@ model_type: 'None' groupsize: 'None' pre_layer: 0 - mode: 'cai-chat' + mode: 'chat' skip_special_tokens: true custom_stopping_strings: '' llama-[0-9]*b-4bit$: diff --git a/modules/chat.py b/modules/chat.py index 0657cfba..f98f5249 100644 --- a/modules/chat.py +++ b/modules/chat.py @@ -251,33 +251,33 @@ def impersonate_wrapper(text, state): def cai_chatbot_wrapper(text, state): for history in chatbot_wrapper(text, state): - yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode']) + yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style']) def regenerate_wrapper(text, state): if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0: - yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode']) + yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'], state['chat_style']) else: for history in chatbot_wrapper('', state, regenerate=True): - yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode']) + yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style']) def continue_wrapper(text, state): if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0: - yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode']) + yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'], state['chat_style']) else: for history in chatbot_wrapper('', state, _continue=True): - yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode']) + yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style']) -def remove_last_message(name1, name2, mode): +def remove_last_message(name1, name2, mode, style): if len(shared.history['visible']) > 0 and shared.history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>': last = shared.history['visible'].pop() shared.history['internal'].pop() else: last = ['', ''] - return chat_html_wrapper(shared.history['visible'], name1, name2, mode), last[0] + return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style), last[0] def send_last_reply_to_input(): @@ -287,35 +287,35 @@ def send_last_reply_to_input(): return '' -def replace_last_reply(text, name1, name2, mode): +def replace_last_reply(text, name1, name2, mode, style): if len(shared.history['visible']) > 0: shared.history['visible'][-1][1] = text shared.history['internal'][-1][1] = apply_extensions("input", text) - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) + return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style) -def send_dummy_message(text, name1, name2, mode): +def send_dummy_message(text, name1, name2, mode, style): shared.history['visible'].append([text, '']) shared.history['internal'].append([apply_extensions("input", text), '']) - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) + return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style) -def send_dummy_reply(text, name1, name2, mode): +def send_dummy_reply(text, name1, name2, mode, style): if len(shared.history['visible']) > 0 and not shared.history['visible'][-1][1] == '': shared.history['visible'].append(['', '']) shared.history['internal'].append(['', '']) shared.history['visible'][-1][1] = text shared.history['internal'][-1][1] = apply_extensions("input", text) - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) + return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style) def clear_html(): return chat_html_wrapper([], "", "") -def clear_chat_log(name1, name2, greeting, mode): +def clear_chat_log(name1, name2, greeting, mode, style): shared.history['visible'] = [] shared.history['internal'] = [] @@ -325,14 +325,14 @@ def clear_chat_log(name1, name2, greeting, mode): # Save cleared logs save_history(mode) - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) + return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style) -def redraw_html(name1, name2, mode): - return chat_html_wrapper(shared.history['visible'], name1, name2, mode) +def redraw_html(name1, name2, mode, style): + return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style) -def tokenize_dialogue(dialogue, name1, name2, mode): +def tokenize_dialogue(dialogue, name1, name2, mode, style): history = [] messages = [] dialogue = re.sub('', '', dialogue) @@ -440,7 +440,7 @@ def generate_pfp_cache(character): return None -def load_character(character, name1, name2, mode): +def load_character(character, name1, name2, mode, style): shared.character = character context = greeting = turn_template = "" greeting_field = 'greeting' @@ -514,7 +514,7 @@ def load_character(character, name1, name2, mode): # Create .json log files since they don't already exist save_history(mode) - return name1, name2, picture, greeting, context, repr(turn_template)[1:-1], chat_html_wrapper(shared.history['visible'], name1, name2, mode) + return name1, name2, picture, greeting, context, repr(turn_template)[1:-1], chat_html_wrapper(shared.history['visible'], name1, name2, mode, style) def upload_character(json_file, img, tavern=False): @@ -549,7 +549,7 @@ def upload_tavern_character(img, name1, name2): return upload_character(json.dumps(_json), img, tavern=True) -def upload_your_profile_picture(img, name1, name2, mode): +def upload_your_profile_picture(img, name1, name2, mode, style): cache_folder = Path("cache") if not cache_folder.exists(): cache_folder.mkdir() @@ -562,4 +562,4 @@ def upload_your_profile_picture(img, name1, name2, mode): img.save(Path('cache/pfp_me.png')) logging.info('Profile picture saved to "cache/pfp_me.png"') - return chat_html_wrapper(shared.history['visible'], name1, name2, mode, reset_cache=True) + return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style, reset_cache=True) diff --git a/modules/html_generator.py b/modules/html_generator.py index 5b01719f..8fead018 100644 --- a/modules/html_generator.py +++ b/modules/html_generator.py @@ -12,6 +12,8 @@ from pathlib import Path import markdown from PIL import Image, ImageOps +from modules.utils import get_available_chat_styles + # This is to store the paths to the thumbnails of the profile pictures image_cache = {} @@ -19,13 +21,14 @@ with open(Path(__file__).resolve().parent / '../css/html_readable_style.css', 'r readable_css = f.read() with open(Path(__file__).resolve().parent / '../css/html_4chan_style.css', 'r') as css_f: _4chan_css = css_f.read() -with open(Path(__file__).resolve().parent / '../css/html_cai_style.css', 'r') as f: - cai_css = f.read() -with open(Path(__file__).resolve().parent / '../css/html_bubble_chat_style.css', 'r') as f: - bubble_chat_css = f.read() with open(Path(__file__).resolve().parent / '../css/html_instruct_style.css', 'r') as f: instruct_css = f.read() +# Custom chat styles +chat_styles = {} +for k in get_available_chat_styles(): + chat_styles[k] = open(Path(f'css/chat_style-{k}.css'), 'r').read() + def fix_newlines(string): string = string.replace('\n', '\n\n') @@ -185,8 +188,8 @@ def generate_instruct_html(history): return output -def generate_cai_chat_html(history, name1, name2, reset_cache=False): - output = f'
' +def generate_cai_chat_html(history, name1, name2, style, reset_cache=False): + output = f'
' # We use ?name2 and ?time.time() to force the browser to reset caches img_bot = f'' if Path("cache/pfp_character.png").exists() else '' @@ -235,7 +238,7 @@ def generate_cai_chat_html(history, name1, name2, reset_cache=False): def generate_chat_html(history, name1, name2, reset_cache=False): - output = f'
' + output = f'
' for i, _row in enumerate(history[::-1]): row = [convert_to_markdown(entry) for entry in _row] @@ -267,12 +270,10 @@ def generate_chat_html(history, name1, name2, reset_cache=False): return output -def chat_html_wrapper(history, name1, name2, mode, reset_cache=False): - if mode == "cai-chat": - return generate_cai_chat_html(history, name1, name2, reset_cache) - elif mode == "chat": - return generate_chat_html(history, name1, name2) - elif mode == "instruct": +def chat_html_wrapper(history, name1, name2, mode, style, reset_cache=False): + if mode == 'instruct': return generate_instruct_html(history) + elif style == 'wpp': + return generate_chat_html(history, name1, name2) else: - return '' + return generate_cai_chat_html(history, name1, name2, style, reset_cache) diff --git a/modules/shared.py b/modules/shared.py index 70fa611f..fd494b9c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -49,7 +49,8 @@ settings = { 'truncation_length': 2048, 'truncation_length_min': 0, 'truncation_length_max': 8192, - 'mode': 'cai-chat', + 'mode': 'chat', + 'chat_style': 'cai-chat', 'instruction_template': 'None', 'chat_prompt_size': 2048, 'chat_prompt_size_min': 0, @@ -95,7 +96,6 @@ parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpForma # Basic settings parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.') parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode with a style similar to the Character.AI website.') -parser.add_argument('--cai-chat', action='store_true', help='DEPRECATED: use --chat instead.') parser.add_argument('--character', type=str, help='The name of the character to load in chat mode by default.') parser.add_argument('--model', type=str, help='Name of the model to load by default.') parser.add_argument('--lora', type=str, nargs="+", help='The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces.') @@ -176,11 +176,6 @@ for k in deprecated_dict: logging.warning(f"--{k} is deprecated and will be removed. Use --{deprecated_dict[k][0]} instead.") setattr(args, deprecated_dict[k][0], getattr(args, k)) -# Deprecation warnings for parameters that have been removed -if args.cai_chat: - logging.warning("--cai-chat is deprecated. Use --chat instead.") - args.chat = True - # Security warnings if args.trust_remote_code: logging.warning("trust_remote_code is enabled. This is dangerous.") diff --git a/modules/ui.py b/modules/ui.py index d8932928..7560c754 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -36,7 +36,7 @@ def list_model_elements(): def list_interface_input_elements(chat=False): elements = ['max_new_tokens', 'seed', 'temperature', 'top_p', 'top_k', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'no_repeat_ngram_size', 'min_length', 'do_sample', 'penalty_alpha', 'num_beams', 'length_penalty', 'early_stopping', 'add_bos_token', 'ban_eos_token', 'truncation_length', 'custom_stopping_strings', 'skip_special_tokens', 'preset_menu', 'stream'] if chat: - elements += ['name1', 'name2', 'greeting', 'context', 'chat_prompt_size', 'chat_generation_attempts', 'stop_at_newline', 'mode', 'instruction_template', 'character_menu', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template'] + elements += ['name1', 'name2', 'greeting', 'context', 'chat_prompt_size', 'chat_generation_attempts', 'stop_at_newline', 'mode', 'instruction_template', 'character_menu', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template', 'chat_style'] elements += list_model_elements() return elements diff --git a/modules/utils.py b/modules/utils.py index 79969340..79cbac32 100644 --- a/modules/utils.py +++ b/modules/utils.py @@ -59,3 +59,7 @@ def get_available_loras(): def get_datasets(path: str, ext: str): return ['None'] + sorted(set([k.stem for k in Path(path).glob(f'*.{ext}') if k.stem != 'put-trainer-datasets-here']), key=natural_keys) + + +def get_available_chat_styles(): + return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys) diff --git a/server.py b/server.py index 358fd2fd..7382ffcd 100644 --- a/server.py +++ b/server.py @@ -481,7 +481,7 @@ def create_interface(): is_instruct = shared.settings['mode'] == 'instruct' with gr.Tab('Text generation', elem_id='main'): - shared.gradio['display'] = gr.HTML(value=chat_html_wrapper(shared.history['visible'], shared.settings['name1'], shared.settings['name2'], 'cai-chat')) + shared.gradio['display'] = gr.HTML(value=chat_html_wrapper(shared.history['visible'], shared.settings['name1'], shared.settings['name2'], 'chat', 'cai-chat')) shared.gradio['textbox'] = gr.Textbox(label='Input') with gr.Row(): shared.gradio['Stop'] = gr.Button('Stop', elem_id='stop') @@ -504,8 +504,12 @@ def create_interface(): shared.gradio['Clear history-confirm'] = gr.Button('Confirm', variant='stop', visible=False) shared.gradio['Clear history-cancel'] = gr.Button('Cancel', visible=False) - shared.gradio['mode'] = gr.Radio(choices=['cai-chat', 'chat', 'instruct'], value=shared.settings['mode'], label='Mode') - shared.gradio['instruction_template'] = gr.Dropdown(choices=utils.get_available_instruction_templates(), label='Instruction template', value='None', visible=is_instruct, info='Change this according to the model/LoRA that you are using.') + with gr.Row(): + with gr.Column(): + shared.gradio['mode'] = gr.Radio(choices=['chat', 'instruct'], value=shared.settings['mode'] if shared.settings['mode'] in ['chat', 'instruct'] else 'chat', label='Mode') + with gr.Column(): + shared.gradio['instruction_template'] = gr.Dropdown(choices=utils.get_available_instruction_templates(), label='Instruction template', value='None', visible=is_instruct, info='Change this according to the model/LoRA that you are using.') + shared.gradio['chat_style'] = gr.Dropdown(choices=utils.get_available_chat_styles(), label='Chat style', value=shared.settings['chat_style'], visible=not is_instruct) with gr.Tab('Character', elem_id='chat-settings'): with gr.Row(): @@ -654,12 +658,13 @@ def create_interface(): # Interface mode tab with gr.Tab("Interface mode", elem_id="interface-mode"): - modes = ["default", "notebook", "chat", "cai_chat"] + modes = ["default", "notebook", "chat"] current_mode = "default" for mode in modes[1:]: if getattr(shared.args, mode): current_mode = mode break + cmd_list = vars(shared.args) bool_list = [k for k in cmd_list if type(cmd_list[k]) is bool and k not in modes + ui.list_model_elements()] bool_active = [k for k in bool_list if vars(shared.args)[k]] @@ -679,7 +684,7 @@ def create_interface(): if shared.is_chat(): shared.input_params = [shared.gradio[k] for k in ['Chat input', 'interface_state']] clear_arr = [shared.gradio[k] for k in ['Clear history-confirm', 'Clear history', 'Clear history-cancel']] - reload_inputs = [shared.gradio[k] for k in ['name1', 'name2', 'mode']] + reload_inputs = [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']] gen_events.append(shared.gradio['Generate'].click( ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then( @@ -713,23 +718,23 @@ def create_interface(): ) shared.gradio['Replace last reply'].click( - chat.replace_last_reply, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode']], shared.gradio['display'], show_progress=False).then( + chat.replace_last_reply, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'], show_progress=False).then( lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False).then( chat.save_history, shared.gradio['mode'], None, show_progress=False) shared.gradio['Send dummy message'].click( - chat.send_dummy_message, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode']], shared.gradio['display'], show_progress=False).then( + chat.send_dummy_message, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'], show_progress=False).then( lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False).then( chat.save_history, shared.gradio['mode'], None, show_progress=False) shared.gradio['Send dummy reply'].click( - chat.send_dummy_reply, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode']], shared.gradio['display'], show_progress=False).then( + chat.send_dummy_reply, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'], show_progress=False).then( lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False).then( chat.save_history, shared.gradio['mode'], None, show_progress=False) shared.gradio['Clear history-confirm'].click( lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr).then( - chat.clear_chat_log, [shared.gradio[k] for k in ['name1', 'name2', 'greeting', 'mode']], shared.gradio['display']).then( + chat.clear_chat_log, [shared.gradio[k] for k in ['name1', 'name2', 'greeting', 'mode', 'chat_style']], shared.gradio['display']).then( chat.save_history, shared.gradio['mode'], None, show_progress=False) shared.gradio['Stop'].click( @@ -737,12 +742,13 @@ def create_interface(): chat.redraw_html, reload_inputs, shared.gradio['display']) shared.gradio['mode'].change( - lambda x: [gr.update(visible=x == 'instruct')] * 5 + [gr.update(visible=x != 'instruct')] * 4, shared.gradio['mode'], [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template', 'name1', 'name2', 'context', 'greeting']]).then( + lambda x: [gr.update(visible=x == 'instruct')] * 5 + [gr.update(visible=x != 'instruct')] * 5, shared.gradio['mode'], [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template', 'name1', 'name2', 'context', 'greeting', 'chat_style']], show_progress=False).then( lambda x: gr.update(interactive=x != 'instruct'), shared.gradio['mode'], shared.gradio['character_menu']).then( chat.redraw_html, reload_inputs, shared.gradio['display']) + shared.gradio['chat_style'].change(chat.redraw_html, reload_inputs, shared.gradio['display']) shared.gradio['instruction_template'].change( - chat.load_character, [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct', 'mode']], [shared.gradio[k] for k in ['name1_instruct', 'name2_instruct', 'dummy', 'dummy', 'context_instruct', 'turn_template', 'display']]).then( + chat.load_character, [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct', 'mode', 'chat_style']], [shared.gradio[k] for k in ['name1_instruct', 'name2_instruct', 'dummy', 'dummy', 'context_instruct', 'turn_template', 'display']]).then( chat.redraw_html, reload_inputs, shared.gradio['display']) shared.gradio['upload_chat_history'].upload( @@ -752,12 +758,12 @@ def create_interface(): shared.gradio['Copy last reply'].click(chat.send_last_reply_to_input, None, shared.gradio['textbox'], show_progress=False) shared.gradio['Clear history'].click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, clear_arr) shared.gradio['Clear history-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr) - shared.gradio['Remove last'].click(chat.remove_last_message, [shared.gradio[k] for k in ['name1', 'name2', 'mode']], [shared.gradio['display'], shared.gradio['textbox']], show_progress=False) + shared.gradio['Remove last'].click(chat.remove_last_message, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], [shared.gradio['display'], shared.gradio['textbox']], show_progress=False) shared.gradio['download_button'].click(lambda x: chat.save_history(x, timestamp=True), shared.gradio['mode'], shared.gradio['download']) shared.gradio['Upload character'].click(chat.upload_character, [shared.gradio['upload_json'], shared.gradio['upload_img_bot']], [shared.gradio['character_menu']]) - shared.gradio['character_menu'].change(chat.load_character, [shared.gradio[k] for k in ['character_menu', 'name1', 'name2', 'mode']], [shared.gradio[k] for k in ['name1', 'name2', 'character_picture', 'greeting', 'context', 'turn_template', 'display']]) + shared.gradio['character_menu'].change(chat.load_character, [shared.gradio[k] for k in ['character_menu', 'name1', 'name2', 'mode', 'chat_style']], [shared.gradio[k] for k in ['name1', 'name2', 'character_picture', 'greeting', 'context', 'turn_template', 'display']]) shared.gradio['upload_img_tavern'].upload(chat.upload_tavern_character, [shared.gradio['upload_img_tavern'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['character_menu']]) - shared.gradio['your_picture'].change(chat.upload_your_profile_picture, [shared.gradio[k] for k in ['your_picture', 'name1', 'name2', 'mode']], shared.gradio['display']) + shared.gradio['your_picture'].change(chat.upload_your_profile_picture, [shared.gradio[k] for k in ['your_picture', 'name1', 'name2', 'mode', 'chat_style']], shared.gradio['display']) shared.gradio['interface'].load(None, None, None, _js=f"() => {{{ui.main_js+ui.chat_js}}}") # notebook/default modes event handlers diff --git a/settings-template.json b/settings-template.json index 9465d799..ebf751d7 100644 --- a/settings-template.json +++ b/settings-template.json @@ -17,7 +17,8 @@ "truncation_length": 2048, "truncation_length_min": 0, "truncation_length_max": 8192, - "mode": "cai-chat", + "mode": "chat", + "chat_style": "cai-chat", "instruction_template": "None", "chat_prompt_size": 2048, "chat_prompt_size_min": 0,