mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 16:17:57 +01:00
Add chat-instruct mode (#2049)
This commit is contained in:
parent
5f6cf39f36
commit
3b886f9c9f
126
modules/chat.py
126
modules/chat.py
@ -19,25 +19,8 @@ from modules.text_generation import (generate_reply, get_encoded_length,
|
|||||||
from modules.utils import replace_all
|
from modules.utils import replace_all
|
||||||
|
|
||||||
|
|
||||||
def generate_chat_prompt(user_input, state, **kwargs):
|
def get_turn_substrings(state, instruct=False):
|
||||||
impersonate = kwargs.get('impersonate', False)
|
if instruct:
|
||||||
_continue = kwargs.get('_continue', False)
|
|
||||||
also_return_rows = kwargs.get('also_return_rows', False)
|
|
||||||
|
|
||||||
history = state.get('history', shared.history['internal'])
|
|
||||||
is_instruct = state['mode'] == 'instruct'
|
|
||||||
rows = [state['context_instruct'] if is_instruct else f"{state['context'].strip()}\n"]
|
|
||||||
min_rows = 3
|
|
||||||
|
|
||||||
# Finding the maximum prompt size
|
|
||||||
chat_prompt_size = state['chat_prompt_size']
|
|
||||||
if shared.soft_prompt:
|
|
||||||
chat_prompt_size -= shared.soft_prompt_tensor.shape[1]
|
|
||||||
|
|
||||||
max_length = min(get_max_prompt_length(state), chat_prompt_size)
|
|
||||||
|
|
||||||
# Building the turn templates
|
|
||||||
if is_instruct:
|
|
||||||
if 'turn_template' not in state or state['turn_template'] == '':
|
if 'turn_template' not in state or state['turn_template'] == '':
|
||||||
template = '<|user|>\n<|user-message|>\n<|bot|>\n<|bot-message|>\n'
|
template = '<|user|>\n<|user-message|>\n<|bot|>\n<|bot-message|>\n'
|
||||||
else:
|
else:
|
||||||
@ -46,44 +29,92 @@ def generate_chat_prompt(user_input, state, **kwargs):
|
|||||||
template = '<|user|>: <|user-message|>\n<|bot|>: <|bot-message|>\n'
|
template = '<|user|>: <|user-message|>\n<|bot|>: <|bot-message|>\n'
|
||||||
|
|
||||||
replacements = {
|
replacements = {
|
||||||
'<|user|>': state['name1_instruct' if is_instruct else 'name1'].strip(),
|
'<|user|>': state['name1_instruct' if instruct else 'name1'].strip(),
|
||||||
'<|bot|>': state['name2_instruct' if is_instruct else 'name2'].strip(),
|
'<|bot|>': state['name2_instruct' if instruct else 'name2'].strip(),
|
||||||
}
|
}
|
||||||
|
|
||||||
user_turn = replace_all(template.split('<|bot|>')[0], replacements)
|
output = {
|
||||||
bot_turn = replace_all('<|bot|>' + template.split('<|bot|>')[1], replacements)
|
'user_turn': template.split('<|bot|>')[0],
|
||||||
user_turn_stripped = replace_all(user_turn.split('<|user-message|>')[0], replacements)
|
'bot_turn': '<|bot|>' + template.split('<|bot|>')[1],
|
||||||
bot_turn_stripped = replace_all(bot_turn.split('<|bot-message|>')[0], replacements)
|
'user_turn_stripped': template.split('<|bot|>')[0].split('<|user-message|>')[0],
|
||||||
|
'bot_turn_stripped': '<|bot|>' + template.split('<|bot|>')[1].split('<|bot-message|>')[0],
|
||||||
|
}
|
||||||
|
|
||||||
|
for k in output:
|
||||||
|
output[k] = replace_all(output[k], replacements)
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def generate_chat_prompt(user_input, state, **kwargs):
|
||||||
|
impersonate = kwargs.get('impersonate', False)
|
||||||
|
_continue = kwargs.get('_continue', False)
|
||||||
|
also_return_rows = kwargs.get('also_return_rows', False)
|
||||||
|
history = state.get('history', shared.history['internal'])
|
||||||
|
is_instruct = state['mode'] == 'instruct'
|
||||||
|
|
||||||
|
# Finding the maximum prompt size
|
||||||
|
chat_prompt_size = state['chat_prompt_size']
|
||||||
|
if shared.soft_prompt:
|
||||||
|
chat_prompt_size -= shared.soft_prompt_tensor.shape[1]
|
||||||
|
|
||||||
|
max_length = min(get_max_prompt_length(state), chat_prompt_size)
|
||||||
|
|
||||||
|
all_substrings = {
|
||||||
|
'chat': get_turn_substrings(state, instruct=False),
|
||||||
|
'instruct': get_turn_substrings(state, instruct=True)
|
||||||
|
}
|
||||||
|
substrings = all_substrings['instruct' if is_instruct else 'chat']
|
||||||
|
|
||||||
|
# Creating the template for "chat-instruct" mode
|
||||||
|
if state['mode'] == 'chat-instruct':
|
||||||
|
wrapper = ''
|
||||||
|
command = state['chat-instruct_command'].replace('<|character|>', state['name2'] if not impersonate else state['name1'])
|
||||||
|
wrapper += state['context_instruct']
|
||||||
|
wrapper += all_substrings['instruct']['user_turn'].replace('<|user-message|>', command)
|
||||||
|
wrapper += all_substrings['instruct']['bot_turn_stripped']
|
||||||
|
if impersonate:
|
||||||
|
wrapper += substrings['user_turn_stripped'].rstrip(' ')
|
||||||
|
else:
|
||||||
|
wrapper += apply_extensions("bot_prefix", substrings['bot_turn_stripped'].rstrip(' '))
|
||||||
|
else:
|
||||||
|
wrapper = '<|prompt|>'
|
||||||
|
|
||||||
# Building the prompt
|
# Building the prompt
|
||||||
|
min_rows = 3
|
||||||
i = len(history) - 1
|
i = len(history) - 1
|
||||||
while i >= 0 and get_encoded_length(''.join(rows)) < max_length:
|
rows = [state['context_instruct'] if is_instruct else f"{state['context'].strip()}\n"]
|
||||||
|
while i >= 0 and get_encoded_length(wrapper.replace('<|prompt|>', ''.join(rows))) < max_length:
|
||||||
if _continue and i == len(history) - 1:
|
if _continue and i == len(history) - 1:
|
||||||
rows.insert(1, bot_turn_stripped + history[i][1].strip())
|
rows.insert(1, substrings['bot_turn_stripped'] + history[i][1].strip())
|
||||||
else:
|
else:
|
||||||
rows.insert(1, bot_turn.replace('<|bot-message|>', history[i][1].strip()))
|
rows.insert(1, substrings['bot_turn'].replace('<|bot-message|>', history[i][1].strip()))
|
||||||
|
|
||||||
string = history[i][0]
|
string = history[i][0]
|
||||||
if string not in ['', '<|BEGIN-VISIBLE-CHAT|>']:
|
if string not in ['', '<|BEGIN-VISIBLE-CHAT|>']:
|
||||||
rows.insert(1, replace_all(user_turn, {'<|user-message|>': string.strip(), '<|round|>': str(i)}))
|
rows.insert(1, replace_all(substrings['user_turn'], {'<|user-message|>': string.strip(), '<|round|>': str(i)}))
|
||||||
|
|
||||||
i -= 1
|
i -= 1
|
||||||
|
|
||||||
if impersonate:
|
if impersonate:
|
||||||
min_rows = 2
|
if state['mode'] == 'chat-instruct':
|
||||||
rows.append(user_turn_stripped.rstrip(' '))
|
min_rows = 1
|
||||||
|
else:
|
||||||
|
min_rows = 2
|
||||||
|
rows.append(substrings['user_turn_stripped'].rstrip(' '))
|
||||||
elif not _continue:
|
elif not _continue:
|
||||||
# Adding the user message
|
# Adding the user message
|
||||||
if len(user_input) > 0:
|
if len(user_input) > 0:
|
||||||
rows.append(replace_all(user_turn, {'<|user-message|>': user_input.strip(), '<|round|>': str(len(history))}))
|
rows.append(replace_all(substrings['user_turn'], {'<|user-message|>': user_input.strip(), '<|round|>': str(len(history))}))
|
||||||
|
|
||||||
# Adding the Character prefix
|
# Adding the Character prefix
|
||||||
rows.append(apply_extensions("bot_prefix", bot_turn_stripped.rstrip(' ')))
|
if state['mode'] != 'chat-instruct':
|
||||||
|
rows.append(apply_extensions("bot_prefix", substrings['bot_turn_stripped'].rstrip(' ')))
|
||||||
|
|
||||||
while len(rows) > min_rows and get_encoded_length(''.join(rows)) >= max_length:
|
while len(rows) > min_rows and get_encoded_length(wrapper.replace('<|prompt|>', ''.join(rows))) >= max_length:
|
||||||
rows.pop(1)
|
rows.pop(1)
|
||||||
|
|
||||||
prompt = ''.join(rows)
|
prompt = wrapper.replace('<|prompt|>', ''.join(rows))
|
||||||
if also_return_rows:
|
if also_return_rows:
|
||||||
return prompt, rows
|
return prompt, rows
|
||||||
else:
|
else:
|
||||||
@ -91,8 +122,9 @@ def generate_chat_prompt(user_input, state, **kwargs):
|
|||||||
|
|
||||||
|
|
||||||
def get_stopping_strings(state):
|
def get_stopping_strings(state):
|
||||||
if state['mode'] == 'instruct':
|
stopping_strings = []
|
||||||
stopping_strings = [
|
if state['mode'] in ['instruct', 'chat-instruct']:
|
||||||
|
stopping_strings += [
|
||||||
state['turn_template'].split('<|user-message|>')[1].split('<|bot|>')[0] + '<|bot|>',
|
state['turn_template'].split('<|user-message|>')[1].split('<|bot|>')[0] + '<|bot|>',
|
||||||
state['turn_template'].split('<|bot-message|>')[1] + '<|user|>'
|
state['turn_template'].split('<|bot-message|>')[1] + '<|user|>'
|
||||||
]
|
]
|
||||||
@ -104,8 +136,12 @@ def get_stopping_strings(state):
|
|||||||
|
|
||||||
for i in range(len(stopping_strings)):
|
for i in range(len(stopping_strings)):
|
||||||
stopping_strings[i] = replace_all(stopping_strings[i], replacements).rstrip(' ').replace(r'\n', '\n')
|
stopping_strings[i] = replace_all(stopping_strings[i], replacements).rstrip(' ').replace(r'\n', '\n')
|
||||||
else:
|
|
||||||
stopping_strings = [f"\n{state['name1']}:", f"\n{state['name2']}:"]
|
if state['mode'] in ['chat', 'chat-instruct']:
|
||||||
|
stopping_strings += [
|
||||||
|
f"\n{state['name1']}:",
|
||||||
|
f"\n{state['name2']}:"
|
||||||
|
]
|
||||||
|
|
||||||
stopping_strings += ast.literal_eval(f"[{state['custom_stopping_strings']}]")
|
stopping_strings += ast.literal_eval(f"[{state['custom_stopping_strings']}]")
|
||||||
return stopping_strings
|
return stopping_strings
|
||||||
@ -433,7 +469,7 @@ def generate_pfp_cache(character):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def load_character(character, name1, name2, mode):
|
def load_character(character, name1, name2, instruct=False):
|
||||||
shared.character = character
|
shared.character = character
|
||||||
context = greeting = turn_template = ""
|
context = greeting = turn_template = ""
|
||||||
greeting_field = 'greeting'
|
greeting_field = 'greeting'
|
||||||
@ -444,7 +480,7 @@ def load_character(character, name1, name2, mode):
|
|||||||
Path("cache/pfp_character.png").unlink()
|
Path("cache/pfp_character.png").unlink()
|
||||||
|
|
||||||
if character != 'None':
|
if character != 'None':
|
||||||
folder = 'characters' if not mode == 'instruct' else 'characters/instruction-following'
|
folder = 'characters' if not instruct else 'characters/instruction-following'
|
||||||
picture = generate_pfp_cache(character)
|
picture = generate_pfp_cache(character)
|
||||||
for extension in ["yml", "yaml", "json"]:
|
for extension in ["yml", "yaml", "json"]:
|
||||||
filepath = Path(f'{folder}/{character}.{extension}')
|
filepath = Path(f'{folder}/{character}.{extension}')
|
||||||
@ -472,8 +508,8 @@ def load_character(character, name1, name2, mode):
|
|||||||
|
|
||||||
if 'context' in data:
|
if 'context' in data:
|
||||||
context = data['context']
|
context = data['context']
|
||||||
if mode != 'instruct':
|
if not instruct:
|
||||||
context = context.strip() + '\n\n'
|
context = context.strip() + '\n'
|
||||||
elif "char_persona" in data:
|
elif "char_persona" in data:
|
||||||
context = build_pygmalion_style_context(data)
|
context = build_pygmalion_style_context(data)
|
||||||
greeting_field = 'char_greeting'
|
greeting_field = 'char_greeting'
|
||||||
@ -493,7 +529,7 @@ def load_character(character, name1, name2, mode):
|
|||||||
greeting = shared.settings['greeting']
|
greeting = shared.settings['greeting']
|
||||||
turn_template = shared.settings['turn_template']
|
turn_template = shared.settings['turn_template']
|
||||||
|
|
||||||
if mode != 'instruct':
|
if not instruct:
|
||||||
shared.history['internal'] = []
|
shared.history['internal'] = []
|
||||||
shared.history['visible'] = []
|
shared.history['visible'] = []
|
||||||
if Path(f'logs/{shared.character}_persistent.json').exists():
|
if Path(f'logs/{shared.character}_persistent.json').exists():
|
||||||
@ -505,7 +541,7 @@ def load_character(character, name1, name2, mode):
|
|||||||
shared.history['visible'] += [['', apply_extensions("output", greeting)]]
|
shared.history['visible'] += [['', apply_extensions("output", greeting)]]
|
||||||
|
|
||||||
# Create .json log files since they don't already exist
|
# Create .json log files since they don't already exist
|
||||||
save_history(mode)
|
save_history('instruct' if instruct else 'chat')
|
||||||
|
|
||||||
return name1, name2, picture, greeting, context, repr(turn_template)[1:-1]
|
return name1, name2, picture, greeting, context, repr(turn_template)[1:-1]
|
||||||
|
|
||||||
|
@ -54,6 +54,7 @@ settings = {
|
|||||||
'mode': 'chat',
|
'mode': 'chat',
|
||||||
'chat_style': 'cai-chat',
|
'chat_style': 'cai-chat',
|
||||||
'instruction_template': 'None',
|
'instruction_template': 'None',
|
||||||
|
'chat-instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>',
|
||||||
'chat_prompt_size': 2048,
|
'chat_prompt_size': 2048,
|
||||||
'chat_prompt_size_min': 0,
|
'chat_prompt_size_min': 0,
|
||||||
'chat_prompt_size_max': 2048,
|
'chat_prompt_size_max': 2048,
|
||||||
|
@ -36,7 +36,7 @@ def list_model_elements():
|
|||||||
def list_interface_input_elements(chat=False):
|
def list_interface_input_elements(chat=False):
|
||||||
elements = ['max_new_tokens', 'seed', 'temperature', 'top_p', 'top_k', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'no_repeat_ngram_size', 'min_length', 'do_sample', 'penalty_alpha', 'num_beams', 'length_penalty', 'early_stopping', 'add_bos_token', 'ban_eos_token', 'truncation_length', 'custom_stopping_strings', 'skip_special_tokens', 'preset_menu', 'stream']
|
elements = ['max_new_tokens', 'seed', 'temperature', 'top_p', 'top_k', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'no_repeat_ngram_size', 'min_length', 'do_sample', 'penalty_alpha', 'num_beams', 'length_penalty', 'early_stopping', 'add_bos_token', 'ban_eos_token', 'truncation_length', 'custom_stopping_strings', 'skip_special_tokens', 'preset_menu', 'stream']
|
||||||
if chat:
|
if chat:
|
||||||
elements += ['name1', 'name2', 'greeting', 'context', 'chat_prompt_size', 'chat_generation_attempts', 'stop_at_newline', 'mode', 'instruction_template', 'character_menu', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template', 'chat_style']
|
elements += ['name1', 'name2', 'greeting', 'context', 'chat_prompt_size', 'chat_generation_attempts', 'stop_at_newline', 'mode', 'instruction_template', 'character_menu', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template', 'chat_style', 'chat-instruct_command']
|
||||||
|
|
||||||
elements += list_model_elements()
|
elements += list_model_elements()
|
||||||
return elements
|
return elements
|
||||||
@ -59,13 +59,7 @@ def apply_interface_values(state, use_persistent=False):
|
|||||||
if len(state) == 0:
|
if len(state) == 0:
|
||||||
return [gr.update() for k in elements] # Dummy, do nothing
|
return [gr.update() for k in elements] # Dummy, do nothing
|
||||||
else:
|
else:
|
||||||
if use_persistent and 'mode' in state:
|
return [state[k] if k in state else gr.update() for k in elements]
|
||||||
if state['mode'] == 'instruct':
|
|
||||||
return [state[k] if (k not in ['character_menu'] and k in state) else gr.update() for k in elements]
|
|
||||||
else:
|
|
||||||
return [state[k] if (k not in ['instruction_template'] and k in state) else gr.update() for k in elements]
|
|
||||||
else:
|
|
||||||
return [state[k] if k in state else gr.update() for k in elements]
|
|
||||||
|
|
||||||
|
|
||||||
class ToolButton(gr.Button, gr.components.FormComponent):
|
class ToolButton(gr.Button, gr.components.FormComponent):
|
||||||
|
62
server.py
62
server.py
@ -514,7 +514,6 @@ def create_interface():
|
|||||||
shared.gradio['interface_state'] = gr.State({k: None for k in shared.input_elements})
|
shared.gradio['interface_state'] = gr.State({k: None for k in shared.input_elements})
|
||||||
shared.gradio['Chat input'] = gr.State()
|
shared.gradio['Chat input'] = gr.State()
|
||||||
shared.gradio['dummy'] = gr.State()
|
shared.gradio['dummy'] = gr.State()
|
||||||
is_instruct = shared.settings['mode'] == 'instruct'
|
|
||||||
|
|
||||||
with gr.Tab('Text generation', elem_id='main'):
|
with gr.Tab('Text generation', elem_id='main'):
|
||||||
shared.gradio['display'] = gr.HTML(value=chat_html_wrapper(shared.history['visible'], shared.settings['name1'], shared.settings['name2'], 'chat', 'cai-chat'))
|
shared.gradio['display'] = gr.HTML(value=chat_html_wrapper(shared.history['visible'], shared.settings['name1'], shared.settings['name2'], 'chat', 'cai-chat'))
|
||||||
@ -540,32 +539,37 @@ def create_interface():
|
|||||||
shared.gradio['Clear history-confirm'] = gr.Button('Confirm', variant='stop', visible=False)
|
shared.gradio['Clear history-confirm'] = gr.Button('Confirm', variant='stop', visible=False)
|
||||||
shared.gradio['Clear history-cancel'] = gr.Button('Cancel', visible=False)
|
shared.gradio['Clear history-cancel'] = gr.Button('Cancel', visible=False)
|
||||||
|
|
||||||
with gr.Row():
|
shared.gradio['mode'] = gr.Radio(choices=['chat', 'chat-instruct', 'instruct'], value=shared.settings['mode'] if shared.settings['mode'] in ['chat', 'instruct', 'chat-instruct'] else 'chat', label='Mode', info='Select the appropriate instruction template under Chat settings > Instruction template when in "instruct" or "chat-instruct" mode.')
|
||||||
with gr.Column():
|
shared.gradio['chat_style'] = gr.Dropdown(choices=utils.get_available_chat_styles(), label='Chat style', value=shared.settings['chat_style'], visible=shared.settings['mode'] != 'instruct')
|
||||||
shared.gradio['mode'] = gr.Radio(choices=['chat', 'instruct'], value=shared.settings['mode'] if shared.settings['mode'] in ['chat', 'instruct'] else 'chat', label='Mode')
|
|
||||||
with gr.Column():
|
|
||||||
shared.gradio['instruction_template'] = gr.Dropdown(choices=utils.get_available_instruction_templates(), label='Instruction template', value='None', visible=is_instruct, info='Change this according to the model/LoRA that you are using.')
|
|
||||||
shared.gradio['chat_style'] = gr.Dropdown(choices=utils.get_available_chat_styles(), label='Chat style', value=shared.settings['chat_style'], visible=not is_instruct)
|
|
||||||
|
|
||||||
with gr.Tab('Character', elem_id='chat-settings'):
|
with gr.Tab('Chat settings', elem_id='chat-settings'):
|
||||||
with gr.Row():
|
with gr.Tab('Character'):
|
||||||
with gr.Column(scale=8):
|
gr.Markdown('Used in chat and chat-instruct modes.')
|
||||||
shared.gradio['name1'] = gr.Textbox(value=shared.settings['name1'], lines=1, label='Your name', visible=not is_instruct)
|
with gr.Row():
|
||||||
shared.gradio['name1_instruct'] = gr.Textbox(value=shared.settings['name1'], lines=1, label='Your name', visible=is_instruct)
|
shared.gradio['character_menu'] = gr.Dropdown(choices=utils.get_available_characters(), label='Character', elem_id='character-menu')
|
||||||
shared.gradio['name2'] = gr.Textbox(value=shared.settings['name2'], lines=1, label='Character\'s name', visible=not is_instruct)
|
ui.create_refresh_button(shared.gradio['character_menu'], lambda: None, lambda: {'choices': utils.get_available_characters()}, 'refresh-button')
|
||||||
shared.gradio['name2_instruct'] = gr.Textbox(value=shared.settings['name2'], lines=1, label='Character\'s name', visible=is_instruct)
|
|
||||||
shared.gradio['greeting'] = gr.Textbox(value=shared.settings['greeting'], lines=4, label='Greeting', visible=not is_instruct)
|
|
||||||
shared.gradio['context'] = gr.Textbox(value=shared.settings['context'], lines=4, label='Context', visible=not is_instruct)
|
|
||||||
shared.gradio['context_instruct'] = gr.Textbox(value=shared.settings['context'], lines=4, label='Context', visible=is_instruct)
|
|
||||||
shared.gradio['turn_template'] = gr.Textbox(value=shared.settings['turn_template'], lines=1, label='Turn template', info='Used to precisely define the placement of spaces and new line characters in instruction prompts.', visible=is_instruct)
|
|
||||||
|
|
||||||
with gr.Column(scale=1):
|
with gr.Row():
|
||||||
shared.gradio['character_picture'] = gr.Image(label='Character picture', type='pil')
|
with gr.Column(scale=8):
|
||||||
shared.gradio['your_picture'] = gr.Image(label='Your picture', type='pil', value=Image.open(Path('cache/pfp_me.png')) if Path('cache/pfp_me.png').exists() else None)
|
shared.gradio['name1'] = gr.Textbox(value=shared.settings['name1'], lines=1, label='Your name')
|
||||||
|
shared.gradio['name2'] = gr.Textbox(value=shared.settings['name2'], lines=1, label='Character\'s name')
|
||||||
|
shared.gradio['context'] = gr.Textbox(value=shared.settings['context'], lines=4, label='Context')
|
||||||
|
shared.gradio['greeting'] = gr.Textbox(value=shared.settings['greeting'], lines=4, label='Greeting')
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Column(scale=1):
|
||||||
shared.gradio['character_menu'] = gr.Dropdown(choices=utils.get_available_characters(), label='Character', elem_id='character-menu', interactive=not is_instruct)
|
shared.gradio['character_picture'] = gr.Image(label='Character picture', type='pil')
|
||||||
ui.create_refresh_button(shared.gradio['character_menu'], lambda: None, lambda: {'choices': utils.get_available_characters()}, 'refresh-button')
|
shared.gradio['your_picture'] = gr.Image(label='Your picture', type='pil', value=Image.open(Path('cache/pfp_me.png')) if Path('cache/pfp_me.png').exists() else None)
|
||||||
|
|
||||||
|
with gr.Tab('Instruction template'):
|
||||||
|
gr.Markdown('Used in instruct and chat-instruct modes.')
|
||||||
|
shared.gradio['instruction_template'] = gr.Dropdown(choices=utils.get_available_instruction_templates(), label='Instruction template', value='None', info='Change this according to the model/LoRA that you are using.')
|
||||||
|
shared.gradio['name1_instruct'] = gr.Textbox(value='', lines=2, label='Your name')
|
||||||
|
shared.gradio['name2_instruct'] = gr.Textbox(value='', lines=1, label='Character\'s name')
|
||||||
|
shared.gradio['context_instruct'] = gr.Textbox(value='', lines=4, label='Context')
|
||||||
|
shared.gradio['turn_template'] = gr.Textbox(value='', lines=1, label='Turn template', info='Used to precisely define the placement of spaces and new line characters in instruction prompts.')
|
||||||
|
|
||||||
|
with gr.Row():
|
||||||
|
shared.gradio['chat-instruct_command'] = gr.Textbox(value=shared.settings['chat-instruct_command'], lines=4, label='Command for chat-instruct mode', info='<|character|> gets replaced by the bot name, and <|prompt|> gets replaced by the regular chat prompt.')
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Tab('Chat history'):
|
with gr.Tab('Chat history'):
|
||||||
@ -706,7 +710,7 @@ def create_interface():
|
|||||||
bool_active = [k for k in bool_list if vars(shared.args)[k]]
|
bool_active = [k for k in bool_list if vars(shared.args)[k]]
|
||||||
|
|
||||||
gr.Markdown("*Experimental*")
|
gr.Markdown("*Experimental*")
|
||||||
shared.gradio['interface_modes_menu'] = gr.Dropdown(choices=modes, value=current_mode, label="Mode")
|
shared.gradio['interface_modes_menu'] = gr.Dropdown(choices=modes, value=current_mode, label="Mode", info='For instruct and chat-instruct modes, make sure to select a template that matches the current model in the "Chat settings" tab.')
|
||||||
shared.gradio['extensions_menu'] = gr.CheckboxGroup(choices=utils.get_available_extensions(), value=shared.args.extensions, label="Available extensions")
|
shared.gradio['extensions_menu'] = gr.CheckboxGroup(choices=utils.get_available_extensions(), value=shared.args.extensions, label="Available extensions")
|
||||||
shared.gradio['bool_menu'] = gr.CheckboxGroup(choices=bool_list, value=bool_active, label="Boolean command-line flags")
|
shared.gradio['bool_menu'] = gr.CheckboxGroup(choices=bool_list, value=bool_active, label="Boolean command-line flags")
|
||||||
shared.gradio['reset_interface'] = gr.Button("Apply and restart the interface")
|
shared.gradio['reset_interface'] = gr.Button("Apply and restart the interface")
|
||||||
@ -783,13 +787,13 @@ def create_interface():
|
|||||||
chat.redraw_html, shared.reload_inputs, shared.gradio['display'])
|
chat.redraw_html, shared.reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
shared.gradio['mode'].change(
|
shared.gradio['mode'].change(
|
||||||
lambda x: [gr.update(visible=x == 'instruct')] * 5 + [gr.update(visible=x != 'instruct')] * 5, shared.gradio['mode'], [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct', 'context_instruct', 'turn_template', 'name1', 'name2', 'context', 'greeting', 'chat_style']], show_progress=False).then(
|
lambda x: gr.update(visible=x != 'instruct'), shared.gradio['mode'], shared.gradio['chat_style'], show_progress=False).then(
|
||||||
lambda x: gr.update(interactive=x != 'instruct'), shared.gradio['mode'], shared.gradio['character_menu']).then(
|
|
||||||
chat.redraw_html, shared.reload_inputs, shared.gradio['display'])
|
chat.redraw_html, shared.reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
|
|
||||||
shared.gradio['chat_style'].change(chat.redraw_html, shared.reload_inputs, shared.gradio['display'])
|
shared.gradio['chat_style'].change(chat.redraw_html, shared.reload_inputs, shared.gradio['display'])
|
||||||
shared.gradio['instruction_template'].change(
|
shared.gradio['instruction_template'].change(
|
||||||
chat.load_character, [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct', 'mode']], [shared.gradio[k] for k in ['name1_instruct', 'name2_instruct', 'dummy', 'dummy', 'context_instruct', 'turn_template']])
|
partial(chat.load_character, instruct=True), [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct']], [shared.gradio[k] for k in ['name1_instruct', 'name2_instruct', 'dummy', 'dummy', 'context_instruct', 'turn_template']])
|
||||||
|
|
||||||
shared.gradio['upload_chat_history'].upload(
|
shared.gradio['upload_chat_history'].upload(
|
||||||
chat.load_history, [shared.gradio[k] for k in ['upload_chat_history', 'name1', 'name2']], None).then(
|
chat.load_history, [shared.gradio[k] for k in ['upload_chat_history', 'name1', 'name2']], None).then(
|
||||||
@ -806,7 +810,7 @@ def create_interface():
|
|||||||
shared.gradio['download_button'].click(lambda x: chat.save_history(x, timestamp=True), shared.gradio['mode'], shared.gradio['download'])
|
shared.gradio['download_button'].click(lambda x: chat.save_history(x, timestamp=True), shared.gradio['mode'], shared.gradio['download'])
|
||||||
shared.gradio['Upload character'].click(chat.upload_character, [shared.gradio['upload_json'], shared.gradio['upload_img_bot']], [shared.gradio['character_menu']])
|
shared.gradio['Upload character'].click(chat.upload_character, [shared.gradio['upload_json'], shared.gradio['upload_img_bot']], [shared.gradio['character_menu']])
|
||||||
shared.gradio['character_menu'].change(
|
shared.gradio['character_menu'].change(
|
||||||
chat.load_character, [shared.gradio[k] for k in ['character_menu', 'name1', 'name2', 'mode']], [shared.gradio[k] for k in ['name1', 'name2', 'character_picture', 'greeting', 'context', 'dummy']]).then(
|
partial(chat.load_character, instruct=False), [shared.gradio[k] for k in ['character_menu', 'name1', 'name2']], [shared.gradio[k] for k in ['name1', 'name2', 'character_picture', 'greeting', 'context', 'dummy']]).then(
|
||||||
chat.redraw_html, shared.reload_inputs, shared.gradio['display'])
|
chat.redraw_html, shared.reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
shared.gradio['upload_img_tavern'].upload(chat.upload_tavern_character, [shared.gradio['upload_img_tavern'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['character_menu']])
|
shared.gradio['upload_img_tavern'].upload(chat.upload_tavern_character, [shared.gradio['upload_img_tavern'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['character_menu']])
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
"mode": "chat",
|
"mode": "chat",
|
||||||
"chat_style": "cai-chat",
|
"chat_style": "cai-chat",
|
||||||
"instruction_template": "None",
|
"instruction_template": "None",
|
||||||
|
"chat-instruct_command": "Continue the chat dialogue below. Write a single reply for the character \"<|character|>\".\n\n<|prompt|>",
|
||||||
"chat_prompt_size": 2048,
|
"chat_prompt_size": 2048,
|
||||||
"chat_prompt_size_min": 0,
|
"chat_prompt_size_min": 0,
|
||||||
"chat_prompt_size_max": 2048,
|
"chat_prompt_size_max": 2048,
|
||||||
|
Loading…
Reference in New Issue
Block a user