Add support for custom chat styles (#1917)

This commit is contained in:
oobabooga 2023-05-08 12:35:03 -03:00 committed by GitHub
parent b040b4110d
commit b5260b24f1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 234 additions and 74 deletions

View File

@ -0,0 +1,137 @@
/* All credits to TheEncrypted777: https://www.reddit.com/r/Oobabooga/comments/12xe6vq/updated_css_styling_with_color_customization_for/ */
.chat {
margin-left: auto;
margin-right: auto;
max-width: 800px;
height: calc(100vh - 300px);
overflow-y: auto;
padding-right: 20px;
display: flex;
flex-direction: column-reverse;
word-break: break-word;
overflow-wrap: anywhere;
}
.message {
display: grid;
grid-template-columns: 60px minmax(0, 1fr);
padding-bottom: 28px;
font-size: 18px;
/*Change 'Quicksand' to a font you like or leave it*/
font-family: Quicksand, Arial, sans-serif;
line-height: 1.428571429;
}
.circle-you {
background-color: gray;
border-radius: 1rem;
/*Change color to any you like to be the border of your image*/
border: 2px solid white;
}
.circle-bot {
background-color: gray;
border-radius: 1rem;
/*Change color to any you like to be the border of the bot's image*/
border: 2px solid white;
}
.circle-bot img,
.circle-you img {
border-radius: 10%;
width: 100%;
height: 100%;
object-fit: cover;
}
.circle-you, .circle-bot {
/*You can set the size of the profile images here, but if you do, you have to also adjust the .text{padding-left: 90px} to a different number according to the width of the image which is right below here*/
width: 135px;
height: 175px;
}
.text {
/*Change this to move the message box further left or right depending on the size of your profile pic*/
padding-left: 90px;
text-shadow: 2px 2px 2px rgb(0, 0, 0);
}
.text p {
margin-top: 2px;
}
.username {
padding-left: 10px;
font-size: 22px;
font-weight: bold;
border-top: 1px solid rgb(51, 64, 90);
padding: 3px;
}
.message-body {
position: relative;
border-radius: 1rem;
border: 1px solid rgba(255, 255, 255, 0.459);
border-radius: 10px;
padding: 10px;
padding-top: 5px;
/*Message gradient background color - remove the line bellow if you don't want a background color or gradient*/
background: linear-gradient(to bottom, #171730, #1b263f);
}
/*Adds 2 extra lines at the top and bottom of the message*/
.message-body:before,
.message-body:after {
content: "";
position: absolute;
left: 10px;
right: 10px;
height: 1px;
background-color: rgba(255, 255, 255, 0.13);
}
.message-body:before {
top: 6px;
}
.message-body:after {
bottom: 6px;
}
.message-body img {
max-width: 300px;
max-height: 300px;
border-radius: 20px;
}
.message-body p {
margin-bottom: 0 !important;
font-size: 18px !important;
line-height: 1.428571429 !important;
}
.message-body li {
margin-top: 0.5em !important;
margin-bottom: 0.5em !important;
}
.message-body li > p {
display: inline !important;
}
.message-body code {
overflow-x: auto;
}
.message-body :not(pre) > code {
white-space: normal !important;
}
.dark .message-body p em {
color: rgb(138, 138, 138) !important;
}
.message-body p em {
color: rgb(110, 110, 110) !important;
}

View File

@ -1,3 +1,5 @@
## Chat characters
Custom chat mode characters are defined by `.yaml` files inside the `characters` folder. An example is included: [Example.yaml](https://github.com/oobabooga/text-generation-webui/blob/main/characters/Example.yaml) Custom chat mode characters are defined by `.yaml` files inside the `characters` folder. An example is included: [Example.yaml](https://github.com/oobabooga/text-generation-webui/blob/main/characters/Example.yaml)
The following fields may be defined: The following fields may be defined:
@ -28,4 +30,16 @@ Once your prompt reaches the 2048 token limit, old messages will be removed one
#### Pygmalion format characters #### Pygmalion format characters
These are also supported out of the box. Simply put the JSON file in the `characters` folder, or upload it directly from the web UI by clicking on the "Upload character" tab at the bottom. These are also supported out of the box. Simply put the JSON file in the `characters` folder, or upload it directly from the web UI by clicking on the "Upload character" tab at the bottom.
## Chat styles
Custom chat styles can be defined in the `text-generation-webui/css` folder. Simply create a new file with name starting in `chat_style-` and ending in `.css` and it will automatically appear in the "Chat style" dropdown menu in the interface. Examples:
```
chat_style-cai-chat.css
chat_style-TheEncrypted777.css
chat_style-wpp.css
```
You should use the same class names as in `chat_style-cai-chat.css` in your custom style.

View File

@ -2,7 +2,7 @@
## Table of contents ## Table of contents
* [Custom-chat-characters](Custom-chat-characters.md) * [Custom-chat-characters](Chat-mode.md)
* [Docker Compose](Docker.md) * [Docker Compose](Docker.md)
* [DeepSpeed](DeepSpeed.md) * [DeepSpeed](DeepSpeed.md)
* [Extensions](Extensions.md) * [Extensions](Extensions.md)

View File

@ -31,14 +31,14 @@ def refresh_voices_dd():
return gr.Dropdown.update(value=all_voices[0], choices=all_voices) return gr.Dropdown.update(value=all_voices[0], choices=all_voices)
def remove_tts_from_history(name1, name2, mode): def remove_tts_from_history(name1, name2, mode, style):
for i, entry in enumerate(shared.history['internal']): for i, entry in enumerate(shared.history['internal']):
shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]] shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]
return chat_html_wrapper(shared.history['visible'], name1, name2, mode) return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def toggle_text_in_history(name1, name2, mode): def toggle_text_in_history(name1, name2, mode, style):
for i, entry in enumerate(shared.history['visible']): for i, entry in enumerate(shared.history['visible']):
visible_reply = entry[1] visible_reply = entry[1]
if visible_reply.startswith('<audio'): if visible_reply.startswith('<audio'):
@ -52,7 +52,7 @@ def toggle_text_in_history(name1, name2, mode):
shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>" shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"
] ]
return chat_html_wrapper(shared.history['visible'], name1, name2, mode) return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def remove_surrounded_chars(string): def remove_surrounded_chars(string):
@ -161,7 +161,7 @@ def ui():
gr.update(visible=False)], None, convert_arr gr.update(visible=False)], None, convert_arr
) )
convert_confirm.click( convert_confirm.click(
remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode']], shared.gradio['display'] remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], shared.gradio['display']
) )
convert_confirm.click(chat.save_history, shared.gradio['mode'], [], show_progress=False) convert_confirm.click(chat.save_history, shared.gradio['mode'], [], show_progress=False)
convert_cancel.click( convert_cancel.click(
@ -178,7 +178,7 @@ def ui():
# Toggle message text in history # Toggle message text in history
show_text.change(lambda x: params.update({"show_text": x}), show_text, None) show_text.change(lambda x: params.update({"show_text": x}), show_text, None)
show_text.change( show_text.change(
toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode']], shared.gradio['display'] toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], shared.gradio['display']
) )
show_text.change(chat.save_history, shared.gradio['mode'], [], show_progress=False) show_text.change(chat.save_history, shared.gradio['mode'], [], show_progress=False)
# Event functions to update the parameters in the backend # Event functions to update the parameters in the backend

View File

@ -57,13 +57,14 @@ def load_model():
return model return model
def remove_tts_from_history(name1, name2, mode): def remove_tts_from_history(name1, name2, mode, style):
for i, entry in enumerate(shared.history['internal']): for i, entry in enumerate(shared.history['internal']):
shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]] shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]
return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def toggle_text_in_history(name1, name2, mode): def toggle_text_in_history(name1, name2, mode, style):
for i, entry in enumerate(shared.history['visible']): for i, entry in enumerate(shared.history['visible']):
visible_reply = entry[1] visible_reply = entry[1]
if visible_reply.startswith('<audio'): if visible_reply.startswith('<audio'):
@ -72,7 +73,8 @@ def toggle_text_in_history(name1, name2, mode):
shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>\n\n{reply}"] shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>\n\n{reply}"]
else: else:
shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"] shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"]
return chat_html_wrapper(shared.history['visible'], name1, name2, mode)
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def state_modifier(state): def state_modifier(state):
@ -167,13 +169,13 @@ def ui():
convert_arr = [convert_confirm, convert, convert_cancel] convert_arr = [convert_confirm, convert, convert_cancel]
convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr) convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
convert_confirm.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr) convert_confirm.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
convert_confirm.click(remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode']], shared.gradio['display']) convert_confirm.click(remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'])
convert_confirm.click(chat.save_history, shared.gradio['mode'], [], show_progress=False) convert_confirm.click(chat.save_history, shared.gradio['mode'], [], show_progress=False)
convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr) convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
# Toggle message text in history # Toggle message text in history
show_text.change(lambda x: params.update({"show_text": x}), show_text, None) show_text.change(lambda x: params.update({"show_text": x}), show_text, None)
show_text.change(toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode']], shared.gradio['display']) show_text.change(toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'])
show_text.change(chat.save_history, shared.gradio['mode'], [], show_progress=False) show_text.change(chat.save_history, shared.gradio['mode'], [], show_progress=False)
# Event functions to update the parameters in the backend # Event functions to update the parameters in the backend

View File

@ -3,7 +3,7 @@
model_type: 'None' model_type: 'None'
groupsize: 'None' groupsize: 'None'
pre_layer: 0 pre_layer: 0
mode: 'cai-chat' mode: 'chat'
skip_special_tokens: true skip_special_tokens: true
custom_stopping_strings: '' custom_stopping_strings: ''
llama-[0-9]*b-4bit$: llama-[0-9]*b-4bit$:

View File

@ -251,33 +251,33 @@ def impersonate_wrapper(text, state):
def cai_chatbot_wrapper(text, state): def cai_chatbot_wrapper(text, state):
for history in chatbot_wrapper(text, state): for history in chatbot_wrapper(text, state):
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode']) yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'])
def regenerate_wrapper(text, state): def regenerate_wrapper(text, state):
if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0: if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0:
yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode']) yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'], state['chat_style'])
else: else:
for history in chatbot_wrapper('', state, regenerate=True): for history in chatbot_wrapper('', state, regenerate=True):
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode']) yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'])
def continue_wrapper(text, state): def continue_wrapper(text, state):
if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0: if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0:
yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode']) yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'], state['chat_style'])
else: else:
for history in chatbot_wrapper('', state, _continue=True): for history in chatbot_wrapper('', state, _continue=True):
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode']) yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'])
def remove_last_message(name1, name2, mode): def remove_last_message(name1, name2, mode, style):
if len(shared.history['visible']) > 0 and shared.history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>': if len(shared.history['visible']) > 0 and shared.history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>':
last = shared.history['visible'].pop() last = shared.history['visible'].pop()
shared.history['internal'].pop() shared.history['internal'].pop()
else: else:
last = ['', ''] last = ['', '']
return chat_html_wrapper(shared.history['visible'], name1, name2, mode), last[0] return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style), last[0]
def send_last_reply_to_input(): def send_last_reply_to_input():
@ -287,35 +287,35 @@ def send_last_reply_to_input():
return '' return ''
def replace_last_reply(text, name1, name2, mode): def replace_last_reply(text, name1, name2, mode, style):
if len(shared.history['visible']) > 0: if len(shared.history['visible']) > 0:
shared.history['visible'][-1][1] = text shared.history['visible'][-1][1] = text
shared.history['internal'][-1][1] = apply_extensions("input", text) shared.history['internal'][-1][1] = apply_extensions("input", text)
return chat_html_wrapper(shared.history['visible'], name1, name2, mode) return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def send_dummy_message(text, name1, name2, mode): def send_dummy_message(text, name1, name2, mode, style):
shared.history['visible'].append([text, '']) shared.history['visible'].append([text, ''])
shared.history['internal'].append([apply_extensions("input", text), '']) shared.history['internal'].append([apply_extensions("input", text), ''])
return chat_html_wrapper(shared.history['visible'], name1, name2, mode) return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def send_dummy_reply(text, name1, name2, mode): def send_dummy_reply(text, name1, name2, mode, style):
if len(shared.history['visible']) > 0 and not shared.history['visible'][-1][1] == '': if len(shared.history['visible']) > 0 and not shared.history['visible'][-1][1] == '':
shared.history['visible'].append(['', '']) shared.history['visible'].append(['', ''])
shared.history['internal'].append(['', '']) shared.history['internal'].append(['', ''])
shared.history['visible'][-1][1] = text shared.history['visible'][-1][1] = text
shared.history['internal'][-1][1] = apply_extensions("input", text) shared.history['internal'][-1][1] = apply_extensions("input", text)
return chat_html_wrapper(shared.history['visible'], name1, name2, mode) return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def clear_html(): def clear_html():
return chat_html_wrapper([], "", "") return chat_html_wrapper([], "", "")
def clear_chat_log(name1, name2, greeting, mode): def clear_chat_log(name1, name2, greeting, mode, style):
shared.history['visible'] = [] shared.history['visible'] = []
shared.history['internal'] = [] shared.history['internal'] = []
@ -325,14 +325,14 @@ def clear_chat_log(name1, name2, greeting, mode):
# Save cleared logs # Save cleared logs
save_history(mode) save_history(mode)
return chat_html_wrapper(shared.history['visible'], name1, name2, mode) return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def redraw_html(name1, name2, mode): def redraw_html(name1, name2, mode, style):
return chat_html_wrapper(shared.history['visible'], name1, name2, mode) return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def tokenize_dialogue(dialogue, name1, name2, mode): def tokenize_dialogue(dialogue, name1, name2, mode, style):
history = [] history = []
messages = [] messages = []
dialogue = re.sub('<START>', '', dialogue) dialogue = re.sub('<START>', '', dialogue)
@ -440,7 +440,7 @@ def generate_pfp_cache(character):
return None return None
def load_character(character, name1, name2, mode): def load_character(character, name1, name2, mode, style):
shared.character = character shared.character = character
context = greeting = turn_template = "" context = greeting = turn_template = ""
greeting_field = 'greeting' greeting_field = 'greeting'
@ -514,7 +514,7 @@ def load_character(character, name1, name2, mode):
# Create .json log files since they don't already exist # Create .json log files since they don't already exist
save_history(mode) save_history(mode)
return name1, name2, picture, greeting, context, repr(turn_template)[1:-1], chat_html_wrapper(shared.history['visible'], name1, name2, mode) return name1, name2, picture, greeting, context, repr(turn_template)[1:-1], chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
def upload_character(json_file, img, tavern=False): def upload_character(json_file, img, tavern=False):
@ -549,7 +549,7 @@ def upload_tavern_character(img, name1, name2):
return upload_character(json.dumps(_json), img, tavern=True) return upload_character(json.dumps(_json), img, tavern=True)
def upload_your_profile_picture(img, name1, name2, mode): def upload_your_profile_picture(img, name1, name2, mode, style):
cache_folder = Path("cache") cache_folder = Path("cache")
if not cache_folder.exists(): if not cache_folder.exists():
cache_folder.mkdir() cache_folder.mkdir()
@ -562,4 +562,4 @@ def upload_your_profile_picture(img, name1, name2, mode):
img.save(Path('cache/pfp_me.png')) img.save(Path('cache/pfp_me.png'))
logging.info('Profile picture saved to "cache/pfp_me.png"') logging.info('Profile picture saved to "cache/pfp_me.png"')
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, reset_cache=True) return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style, reset_cache=True)

View File

@ -12,6 +12,8 @@ from pathlib import Path
import markdown import markdown
from PIL import Image, ImageOps from PIL import Image, ImageOps
from modules.utils import get_available_chat_styles
# This is to store the paths to the thumbnails of the profile pictures # This is to store the paths to the thumbnails of the profile pictures
image_cache = {} image_cache = {}
@ -19,13 +21,14 @@ with open(Path(__file__).resolve().parent / '../css/html_readable_style.css', 'r
readable_css = f.read() readable_css = f.read()
with open(Path(__file__).resolve().parent / '../css/html_4chan_style.css', 'r') as css_f: with open(Path(__file__).resolve().parent / '../css/html_4chan_style.css', 'r') as css_f:
_4chan_css = css_f.read() _4chan_css = css_f.read()
with open(Path(__file__).resolve().parent / '../css/html_cai_style.css', 'r') as f:
cai_css = f.read()
with open(Path(__file__).resolve().parent / '../css/html_bubble_chat_style.css', 'r') as f:
bubble_chat_css = f.read()
with open(Path(__file__).resolve().parent / '../css/html_instruct_style.css', 'r') as f: with open(Path(__file__).resolve().parent / '../css/html_instruct_style.css', 'r') as f:
instruct_css = f.read() instruct_css = f.read()
# Custom chat styles
chat_styles = {}
for k in get_available_chat_styles():
chat_styles[k] = open(Path(f'css/chat_style-{k}.css'), 'r').read()
def fix_newlines(string): def fix_newlines(string):
string = string.replace('\n', '\n\n') string = string.replace('\n', '\n\n')
@ -185,8 +188,8 @@ def generate_instruct_html(history):
return output return output
def generate_cai_chat_html(history, name1, name2, reset_cache=False): def generate_cai_chat_html(history, name1, name2, style, reset_cache=False):
output = f'<style>{cai_css}</style><div class="chat" id="chat">' output = f'<style>{chat_styles[style]}</style><div class="chat" id="chat">'
# We use ?name2 and ?time.time() to force the browser to reset caches # We use ?name2 and ?time.time() to force the browser to reset caches
img_bot = f'<img src="file/cache/pfp_character.png?{name2}">' if Path("cache/pfp_character.png").exists() else '' img_bot = f'<img src="file/cache/pfp_character.png?{name2}">' if Path("cache/pfp_character.png").exists() else ''
@ -235,7 +238,7 @@ def generate_cai_chat_html(history, name1, name2, reset_cache=False):
def generate_chat_html(history, name1, name2, reset_cache=False): def generate_chat_html(history, name1, name2, reset_cache=False):
output = f'<style>{bubble_chat_css}</style><div class="chat" id="chat">' output = f'<style>{chat_styles["wpp"]}</style><div class="chat" id="chat">'
for i, _row in enumerate(history[::-1]): for i, _row in enumerate(history[::-1]):
row = [convert_to_markdown(entry) for entry in _row] row = [convert_to_markdown(entry) for entry in _row]
@ -267,12 +270,10 @@ def generate_chat_html(history, name1, name2, reset_cache=False):
return output return output
def chat_html_wrapper(history, name1, name2, mode, reset_cache=False): def chat_html_wrapper(history, name1, name2, mode, style, reset_cache=False):
if mode == "cai-chat": if mode == 'instruct':
return generate_cai_chat_html(history, name1, name2, reset_cache)
elif mode == "chat":
return generate_chat_html(history, name1, name2)
elif mode == "instruct":
return generate_instruct_html(history) return generate_instruct_html(history)
elif style == 'wpp':
return generate_chat_html(history, name1, name2)
else: else:
return '' return generate_cai_chat_html(history, name1, name2, style, reset_cache)

View File

@ -49,7 +49,8 @@ settings = {
'truncation_length': 2048, 'truncation_length': 2048,
'truncation_length_min': 0, 'truncation_length_min': 0,
'truncation_length_max': 8192, 'truncation_length_max': 8192,
'mode': 'cai-chat', 'mode': 'chat',
'chat_style': 'cai-chat',
'instruction_template': 'None', 'instruction_template': 'None',
'chat_prompt_size': 2048, 'chat_prompt_size': 2048,
'chat_prompt_size_min': 0, 'chat_prompt_size_min': 0,
@ -95,7 +96,6 @@ parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpForma
# Basic settings # Basic settings
parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.') parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.')
parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode with a style similar to the Character.AI website.') parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode with a style similar to the Character.AI website.')
parser.add_argument('--cai-chat', action='store_true', help='DEPRECATED: use --chat instead.')
parser.add_argument('--character', type=str, help='The name of the character to load in chat mode by default.') parser.add_argument('--character', type=str, help='The name of the character to load in chat mode by default.')
parser.add_argument('--model', type=str, help='Name of the model to load by default.') parser.add_argument('--model', type=str, help='Name of the model to load by default.')
parser.add_argument('--lora', type=str, nargs="+", help='The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces.') parser.add_argument('--lora', type=str, nargs="+", help='The list of LoRAs to load. If you want to load more than one LoRA, write the names separated by spaces.')
@ -176,11 +176,6 @@ for k in deprecated_dict:
logging.warning(f"--{k} is deprecated and will be removed. Use --{deprecated_dict[k][0]} instead.") logging.warning(f"--{k} is deprecated and will be removed. Use --{deprecated_dict[k][0]} instead.")
setattr(args, deprecated_dict[k][0], getattr(args, k)) setattr(args, deprecated_dict[k][0], getattr(args, k))
# Deprecation warnings for parameters that have been removed
if args.cai_chat:
logging.warning("--cai-chat is deprecated. Use --chat instead.")
args.chat = True
# Security warnings # Security warnings
if args.trust_remote_code: if args.trust_remote_code:
logging.warning("trust_remote_code is enabled. This is dangerous.") logging.warning("trust_remote_code is enabled. This is dangerous.")

View File

@ -36,7 +36,7 @@ def list_model_elements():
def list_interface_input_elements(chat=False): def list_interface_input_elements(chat=False):
elements = ['max_new_tokens', 'seed', 'temperature', 'top_p', 'top_k', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'no_repeat_ngram_size', 'min_length', 'do_sample', 'penalty_alpha', 'num_beams', 'length_penalty', 'early_stopping', 'add_bos_token', 'ban_eos_token', 'truncation_length', 'custom_stopping_strings', 'skip_special_tokens', 'preset_menu', 'stream'] elements = ['max_new_tokens', 'seed', 'temperature', 'top_p', 'top_k', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'no_repeat_ngram_size', 'min_length', 'do_sample', 'penalty_alpha', 'num_beams', 'length_penalty', 'early_stopping', 'add_bos_token', 'ban_eos_token', 'truncation_length', 'custom_stopping_strings', 'skip_special_tokens', 'preset_menu', 'stream']
if chat: if chat:
elements += ['name1', 'name2', 'greeting', 'context', 'chat_prompt_size', 'chat_generation_attempts', 'stop_at_newline', 'mode', 'instruction_template', 'character_menu', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template'] elements += ['name1', 'name2', 'greeting', 'context', 'chat_prompt_size', 'chat_generation_attempts', 'stop_at_newline', 'mode', 'instruction_template', 'character_menu', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template', 'chat_style']
elements += list_model_elements() elements += list_model_elements()
return elements return elements

View File

@ -59,3 +59,7 @@ def get_available_loras():
def get_datasets(path: str, ext: str): def get_datasets(path: str, ext: str):
return ['None'] + sorted(set([k.stem for k in Path(path).glob(f'*.{ext}') if k.stem != 'put-trainer-datasets-here']), key=natural_keys) return ['None'] + sorted(set([k.stem for k in Path(path).glob(f'*.{ext}') if k.stem != 'put-trainer-datasets-here']), key=natural_keys)
def get_available_chat_styles():
return sorted(set(('-'.join(k.stem.split('-')[1:]) for k in Path('css').glob('chat_style*.css'))), key=natural_keys)

View File

@ -481,7 +481,7 @@ def create_interface():
is_instruct = shared.settings['mode'] == 'instruct' is_instruct = shared.settings['mode'] == 'instruct'
with gr.Tab('Text generation', elem_id='main'): with gr.Tab('Text generation', elem_id='main'):
shared.gradio['display'] = gr.HTML(value=chat_html_wrapper(shared.history['visible'], shared.settings['name1'], shared.settings['name2'], 'cai-chat')) shared.gradio['display'] = gr.HTML(value=chat_html_wrapper(shared.history['visible'], shared.settings['name1'], shared.settings['name2'], 'chat', 'cai-chat'))
shared.gradio['textbox'] = gr.Textbox(label='Input') shared.gradio['textbox'] = gr.Textbox(label='Input')
with gr.Row(): with gr.Row():
shared.gradio['Stop'] = gr.Button('Stop', elem_id='stop') shared.gradio['Stop'] = gr.Button('Stop', elem_id='stop')
@ -504,8 +504,12 @@ def create_interface():
shared.gradio['Clear history-confirm'] = gr.Button('Confirm', variant='stop', visible=False) shared.gradio['Clear history-confirm'] = gr.Button('Confirm', variant='stop', visible=False)
shared.gradio['Clear history-cancel'] = gr.Button('Cancel', visible=False) shared.gradio['Clear history-cancel'] = gr.Button('Cancel', visible=False)
shared.gradio['mode'] = gr.Radio(choices=['cai-chat', 'chat', 'instruct'], value=shared.settings['mode'], label='Mode') with gr.Row():
shared.gradio['instruction_template'] = gr.Dropdown(choices=utils.get_available_instruction_templates(), label='Instruction template', value='None', visible=is_instruct, info='Change this according to the model/LoRA that you are using.') with gr.Column():
shared.gradio['mode'] = gr.Radio(choices=['chat', 'instruct'], value=shared.settings['mode'] if shared.settings['mode'] in ['chat', 'instruct'] else 'chat', label='Mode')
with gr.Column():
shared.gradio['instruction_template'] = gr.Dropdown(choices=utils.get_available_instruction_templates(), label='Instruction template', value='None', visible=is_instruct, info='Change this according to the model/LoRA that you are using.')
shared.gradio['chat_style'] = gr.Dropdown(choices=utils.get_available_chat_styles(), label='Chat style', value=shared.settings['chat_style'], visible=not is_instruct)
with gr.Tab('Character', elem_id='chat-settings'): with gr.Tab('Character', elem_id='chat-settings'):
with gr.Row(): with gr.Row():
@ -654,12 +658,13 @@ def create_interface():
# Interface mode tab # Interface mode tab
with gr.Tab("Interface mode", elem_id="interface-mode"): with gr.Tab("Interface mode", elem_id="interface-mode"):
modes = ["default", "notebook", "chat", "cai_chat"] modes = ["default", "notebook", "chat"]
current_mode = "default" current_mode = "default"
for mode in modes[1:]: for mode in modes[1:]:
if getattr(shared.args, mode): if getattr(shared.args, mode):
current_mode = mode current_mode = mode
break break
cmd_list = vars(shared.args) cmd_list = vars(shared.args)
bool_list = [k for k in cmd_list if type(cmd_list[k]) is bool and k not in modes + ui.list_model_elements()] bool_list = [k for k in cmd_list if type(cmd_list[k]) is bool and k not in modes + ui.list_model_elements()]
bool_active = [k for k in bool_list if vars(shared.args)[k]] bool_active = [k for k in bool_list if vars(shared.args)[k]]
@ -679,7 +684,7 @@ def create_interface():
if shared.is_chat(): if shared.is_chat():
shared.input_params = [shared.gradio[k] for k in ['Chat input', 'interface_state']] shared.input_params = [shared.gradio[k] for k in ['Chat input', 'interface_state']]
clear_arr = [shared.gradio[k] for k in ['Clear history-confirm', 'Clear history', 'Clear history-cancel']] clear_arr = [shared.gradio[k] for k in ['Clear history-confirm', 'Clear history', 'Clear history-cancel']]
reload_inputs = [shared.gradio[k] for k in ['name1', 'name2', 'mode']] reload_inputs = [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']]
gen_events.append(shared.gradio['Generate'].click( gen_events.append(shared.gradio['Generate'].click(
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then( ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
@ -713,23 +718,23 @@ def create_interface():
) )
shared.gradio['Replace last reply'].click( shared.gradio['Replace last reply'].click(
chat.replace_last_reply, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode']], shared.gradio['display'], show_progress=False).then( chat.replace_last_reply, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'], show_progress=False).then(
lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False).then( lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False).then(
chat.save_history, shared.gradio['mode'], None, show_progress=False) chat.save_history, shared.gradio['mode'], None, show_progress=False)
shared.gradio['Send dummy message'].click( shared.gradio['Send dummy message'].click(
chat.send_dummy_message, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode']], shared.gradio['display'], show_progress=False).then( chat.send_dummy_message, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'], show_progress=False).then(
lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False).then( lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False).then(
chat.save_history, shared.gradio['mode'], None, show_progress=False) chat.save_history, shared.gradio['mode'], None, show_progress=False)
shared.gradio['Send dummy reply'].click( shared.gradio['Send dummy reply'].click(
chat.send_dummy_reply, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode']], shared.gradio['display'], show_progress=False).then( chat.send_dummy_reply, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'], show_progress=False).then(
lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False).then( lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False).then(
chat.save_history, shared.gradio['mode'], None, show_progress=False) chat.save_history, shared.gradio['mode'], None, show_progress=False)
shared.gradio['Clear history-confirm'].click( shared.gradio['Clear history-confirm'].click(
lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr).then( lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr).then(
chat.clear_chat_log, [shared.gradio[k] for k in ['name1', 'name2', 'greeting', 'mode']], shared.gradio['display']).then( chat.clear_chat_log, [shared.gradio[k] for k in ['name1', 'name2', 'greeting', 'mode', 'chat_style']], shared.gradio['display']).then(
chat.save_history, shared.gradio['mode'], None, show_progress=False) chat.save_history, shared.gradio['mode'], None, show_progress=False)
shared.gradio['Stop'].click( shared.gradio['Stop'].click(
@ -737,12 +742,13 @@ def create_interface():
chat.redraw_html, reload_inputs, shared.gradio['display']) chat.redraw_html, reload_inputs, shared.gradio['display'])
shared.gradio['mode'].change( shared.gradio['mode'].change(
lambda x: [gr.update(visible=x == 'instruct')] * 5 + [gr.update(visible=x != 'instruct')] * 4, shared.gradio['mode'], [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template', 'name1', 'name2', 'context', 'greeting']]).then( lambda x: [gr.update(visible=x == 'instruct')] * 5 + [gr.update(visible=x != 'instruct')] * 5, shared.gradio['mode'], [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template', 'name1', 'name2', 'context', 'greeting', 'chat_style']], show_progress=False).then(
lambda x: gr.update(interactive=x != 'instruct'), shared.gradio['mode'], shared.gradio['character_menu']).then( lambda x: gr.update(interactive=x != 'instruct'), shared.gradio['mode'], shared.gradio['character_menu']).then(
chat.redraw_html, reload_inputs, shared.gradio['display']) chat.redraw_html, reload_inputs, shared.gradio['display'])
shared.gradio['chat_style'].change(chat.redraw_html, reload_inputs, shared.gradio['display'])
shared.gradio['instruction_template'].change( shared.gradio['instruction_template'].change(
chat.load_character, [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct', 'mode']], [shared.gradio[k] for k in ['name1_instruct', 'name2_instruct', 'dummy', 'dummy', 'context_instruct', 'turn_template', 'display']]).then( chat.load_character, [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct', 'mode', 'chat_style']], [shared.gradio[k] for k in ['name1_instruct', 'name2_instruct', 'dummy', 'dummy', 'context_instruct', 'turn_template', 'display']]).then(
chat.redraw_html, reload_inputs, shared.gradio['display']) chat.redraw_html, reload_inputs, shared.gradio['display'])
shared.gradio['upload_chat_history'].upload( shared.gradio['upload_chat_history'].upload(
@ -752,12 +758,12 @@ def create_interface():
shared.gradio['Copy last reply'].click(chat.send_last_reply_to_input, None, shared.gradio['textbox'], show_progress=False) shared.gradio['Copy last reply'].click(chat.send_last_reply_to_input, None, shared.gradio['textbox'], show_progress=False)
shared.gradio['Clear history'].click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, clear_arr) shared.gradio['Clear history'].click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, clear_arr)
shared.gradio['Clear history-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr) shared.gradio['Clear history-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr)
shared.gradio['Remove last'].click(chat.remove_last_message, [shared.gradio[k] for k in ['name1', 'name2', 'mode']], [shared.gradio['display'], shared.gradio['textbox']], show_progress=False) shared.gradio['Remove last'].click(chat.remove_last_message, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], [shared.gradio['display'], shared.gradio['textbox']], show_progress=False)
shared.gradio['download_button'].click(lambda x: chat.save_history(x, timestamp=True), shared.gradio['mode'], shared.gradio['download']) shared.gradio['download_button'].click(lambda x: chat.save_history(x, timestamp=True), shared.gradio['mode'], shared.gradio['download'])
shared.gradio['Upload character'].click(chat.upload_character, [shared.gradio['upload_json'], shared.gradio['upload_img_bot']], [shared.gradio['character_menu']]) shared.gradio['Upload character'].click(chat.upload_character, [shared.gradio['upload_json'], shared.gradio['upload_img_bot']], [shared.gradio['character_menu']])
shared.gradio['character_menu'].change(chat.load_character, [shared.gradio[k] for k in ['character_menu', 'name1', 'name2', 'mode']], [shared.gradio[k] for k in ['name1', 'name2', 'character_picture', 'greeting', 'context', 'turn_template', 'display']]) shared.gradio['character_menu'].change(chat.load_character, [shared.gradio[k] for k in ['character_menu', 'name1', 'name2', 'mode', 'chat_style']], [shared.gradio[k] for k in ['name1', 'name2', 'character_picture', 'greeting', 'context', 'turn_template', 'display']])
shared.gradio['upload_img_tavern'].upload(chat.upload_tavern_character, [shared.gradio['upload_img_tavern'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['character_menu']]) shared.gradio['upload_img_tavern'].upload(chat.upload_tavern_character, [shared.gradio['upload_img_tavern'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['character_menu']])
shared.gradio['your_picture'].change(chat.upload_your_profile_picture, [shared.gradio[k] for k in ['your_picture', 'name1', 'name2', 'mode']], shared.gradio['display']) shared.gradio['your_picture'].change(chat.upload_your_profile_picture, [shared.gradio[k] for k in ['your_picture', 'name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'])
shared.gradio['interface'].load(None, None, None, _js=f"() => {{{ui.main_js+ui.chat_js}}}") shared.gradio['interface'].load(None, None, None, _js=f"() => {{{ui.main_js+ui.chat_js}}}")
# notebook/default modes event handlers # notebook/default modes event handlers

View File

@ -17,7 +17,8 @@
"truncation_length": 2048, "truncation_length": 2048,
"truncation_length_min": 0, "truncation_length_min": 0,
"truncation_length_max": 8192, "truncation_length_max": 8192,
"mode": "cai-chat", "mode": "chat",
"chat_style": "cai-chat",
"instruction_template": "None", "instruction_template": "None",
"chat_prompt_size": 2048, "chat_prompt_size": 2048,
"chat_prompt_size_min": 0, "chat_prompt_size_min": 0,