mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 16:17:57 +01:00
Refactor chat functions (#2003)
This commit is contained in:
parent
4e9da22c58
commit
638c6a65a2
@ -35,18 +35,15 @@ class Handler(BaseHTTPRequestHandler):
|
|||||||
generate_params['stream'] = False
|
generate_params['stream'] = False
|
||||||
|
|
||||||
generator = generate_reply(
|
generator = generate_reply(
|
||||||
prompt, generate_params, stopping_strings=stopping_strings)
|
prompt, generate_params, stopping_strings=stopping_strings, is_chat=False)
|
||||||
|
|
||||||
answer = ''
|
answer = ''
|
||||||
for a in generator:
|
for a in generator:
|
||||||
if isinstance(a, str):
|
|
||||||
answer = a
|
answer = a
|
||||||
else:
|
|
||||||
answer = a[0]
|
|
||||||
|
|
||||||
response = json.dumps({
|
response = json.dumps({
|
||||||
'results': [{
|
'results': [{
|
||||||
'text': answer if shared.is_chat() else answer[len(prompt):]
|
'text': answer[len(prompt):]
|
||||||
}]
|
}]
|
||||||
})
|
})
|
||||||
self.wfile.write(response.encode('utf-8'))
|
self.wfile.write(response.encode('utf-8'))
|
||||||
|
@ -26,19 +26,14 @@ async def _handle_connection(websocket, path):
|
|||||||
generate_params['stream'] = True
|
generate_params['stream'] = True
|
||||||
|
|
||||||
generator = generate_reply(
|
generator = generate_reply(
|
||||||
prompt, generate_params, stopping_strings=stopping_strings)
|
prompt, generate_params, stopping_strings=stopping_strings, is_chat=False)
|
||||||
|
|
||||||
# As we stream, only send the new bytes.
|
# As we stream, only send the new bytes.
|
||||||
skip_index = len(prompt) if not shared.is_chat() else 0
|
skip_index = len(prompt)
|
||||||
message_num = 0
|
message_num = 0
|
||||||
|
|
||||||
for a in generator:
|
for a in generator:
|
||||||
to_send = ''
|
|
||||||
if isinstance(a, str):
|
|
||||||
to_send = a[skip_index:]
|
to_send = a[skip_index:]
|
||||||
else:
|
|
||||||
to_send = a[0][skip_index:]
|
|
||||||
|
|
||||||
await websocket.send(json.dumps({
|
await websocket.send(json.dumps({
|
||||||
'event': 'text_stream',
|
'event': 'text_stream',
|
||||||
'message_num': message_num,
|
'message_num': message_num,
|
||||||
|
@ -3,9 +3,7 @@ from pathlib import Path
|
|||||||
|
|
||||||
import elevenlabs
|
import elevenlabs
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
|
|
||||||
from modules import chat, shared
|
from modules import chat, shared
|
||||||
from modules.html_generator import chat_html_wrapper
|
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
'activate': True,
|
'activate': True,
|
||||||
@ -31,14 +29,12 @@ def refresh_voices_dd():
|
|||||||
return gr.Dropdown.update(value=all_voices[0], choices=all_voices)
|
return gr.Dropdown.update(value=all_voices[0], choices=all_voices)
|
||||||
|
|
||||||
|
|
||||||
def remove_tts_from_history(name1, name2, mode, style):
|
def remove_tts_from_history():
|
||||||
for i, entry in enumerate(shared.history['internal']):
|
for i, entry in enumerate(shared.history['internal']):
|
||||||
shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]
|
shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]
|
||||||
|
|
||||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
|
|
||||||
|
|
||||||
|
def toggle_text_in_history():
|
||||||
def toggle_text_in_history(name1, name2, mode, style):
|
|
||||||
for i, entry in enumerate(shared.history['visible']):
|
for i, entry in enumerate(shared.history['visible']):
|
||||||
visible_reply = entry[1]
|
visible_reply = entry[1]
|
||||||
if visible_reply.startswith('<audio'):
|
if visible_reply.startswith('<audio'):
|
||||||
@ -52,8 +48,6 @@ def toggle_text_in_history(name1, name2, mode, style):
|
|||||||
shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"
|
shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"
|
||||||
]
|
]
|
||||||
|
|
||||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
|
|
||||||
|
|
||||||
|
|
||||||
def remove_surrounded_chars(string):
|
def remove_surrounded_chars(string):
|
||||||
# this expression matches to 'as few symbols as possible (0 upwards) between any asterisks' OR
|
# this expression matches to 'as few symbols as possible (0 upwards) between any asterisks' OR
|
||||||
@ -152,22 +146,23 @@ def ui():
|
|||||||
|
|
||||||
# Convert history with confirmation
|
# Convert history with confirmation
|
||||||
convert_arr = [convert_confirm, convert, convert_cancel]
|
convert_arr = [convert_confirm, convert, convert_cancel]
|
||||||
convert.click(
|
convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
|
||||||
lambda: [gr.update(visible=True), gr.update(visible=False),
|
|
||||||
gr.update(visible=True)], None, convert_arr
|
|
||||||
)
|
|
||||||
convert_confirm.click(
|
convert_confirm.click(
|
||||||
lambda: [gr.update(visible=False), gr.update(visible=True),
|
lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then(
|
||||||
gr.update(visible=False)], None, convert_arr
|
remove_tts_from_history, None, None).then(
|
||||||
)
|
chat.save_history, shared.gradio['mode'], None, show_progress=False).then(
|
||||||
convert_confirm.click(
|
chat.redraw_html, shared.reload_inputs, shared.gradio['display'])
|
||||||
remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], shared.gradio['display']
|
|
||||||
)
|
convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
|
||||||
convert_confirm.click(chat.save_history, shared.gradio['mode'], [], show_progress=False)
|
|
||||||
convert_cancel.click(
|
# Toggle message text in history
|
||||||
lambda: [gr.update(visible=False), gr.update(visible=True),
|
show_text.change(
|
||||||
gr.update(visible=False)], None, convert_arr
|
lambda x: params.update({"show_text": x}), show_text, None).then(
|
||||||
)
|
toggle_text_in_history, None, None).then(
|
||||||
|
chat.save_history, shared.gradio['mode'], None, show_progress=False).then(
|
||||||
|
chat.redraw_html, shared.reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
|
convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
|
||||||
|
|
||||||
# Event functions to update the parameters in the backend
|
# Event functions to update the parameters in the backend
|
||||||
activate.change(lambda x: params.update({'activate': x}), activate, None)
|
activate.change(lambda x: params.update({'activate': x}), activate, None)
|
||||||
@ -175,11 +170,5 @@ def ui():
|
|||||||
api_key.change(lambda x: params.update({'api_key': x}), api_key, None)
|
api_key.change(lambda x: params.update({'api_key': x}), api_key, None)
|
||||||
# connect.click(check_valid_api, [], connection_status)
|
# connect.click(check_valid_api, [], connection_status)
|
||||||
refresh.click(refresh_voices_dd, [], voice)
|
refresh.click(refresh_voices_dd, [], voice)
|
||||||
# Toggle message text in history
|
|
||||||
show_text.change(lambda x: params.update({"show_text": x}), show_text, None)
|
|
||||||
show_text.change(
|
|
||||||
toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], shared.gradio['display']
|
|
||||||
)
|
|
||||||
show_text.change(chat.save_history, shared.gradio['mode'], [], show_progress=False)
|
|
||||||
# Event functions to update the parameters in the backend
|
# Event functions to update the parameters in the backend
|
||||||
autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None)
|
autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None)
|
||||||
|
@ -43,5 +43,5 @@ def ui():
|
|||||||
picture_select.upload(
|
picture_select.upload(
|
||||||
lambda picture, name1, name2: input_hijack.update({"state": True, "value": generate_chat_picture(picture, name1, name2)}), [picture_select, shared.gradio['name1'], shared.gradio['name2']], None).then(
|
lambda picture, name1, name2: input_hijack.update({"state": True, "value": generate_chat_picture(picture, name1, name2)}), [picture_select, shared.gradio['name1'], shared.gradio['name2']], None).then(
|
||||||
gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
||||||
chat.cai_chatbot_wrapper, shared.input_params, shared.gradio['display'], show_progress=False).then(
|
chat.generate_chat_reply_wrapper, shared.input_params, shared.gradio['display'], show_progress=False).then(
|
||||||
lambda: None, None, picture_select, show_progress=False)
|
lambda: None, None, picture_select, show_progress=False)
|
||||||
|
@ -3,9 +3,9 @@ from pathlib import Path
|
|||||||
|
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
import torch
|
import torch
|
||||||
from extensions.silero_tts import tts_preprocessor
|
|
||||||
from modules import chat, shared
|
from modules import chat, shared
|
||||||
from modules.html_generator import chat_html_wrapper
|
|
||||||
|
from extensions.silero_tts import tts_preprocessor
|
||||||
|
|
||||||
torch._C._jit_set_profiling_mode(False)
|
torch._C._jit_set_profiling_mode(False)
|
||||||
|
|
||||||
@ -56,14 +56,12 @@ def load_model():
|
|||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
def remove_tts_from_history(name1, name2, mode, style):
|
def remove_tts_from_history():
|
||||||
for i, entry in enumerate(shared.history['internal']):
|
for i, entry in enumerate(shared.history['internal']):
|
||||||
shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]
|
shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]
|
||||||
|
|
||||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
|
|
||||||
|
|
||||||
|
def toggle_text_in_history():
|
||||||
def toggle_text_in_history(name1, name2, mode, style):
|
|
||||||
for i, entry in enumerate(shared.history['visible']):
|
for i, entry in enumerate(shared.history['visible']):
|
||||||
visible_reply = entry[1]
|
visible_reply = entry[1]
|
||||||
if visible_reply.startswith('<audio'):
|
if visible_reply.startswith('<audio'):
|
||||||
@ -73,8 +71,6 @@ def toggle_text_in_history(name1, name2, mode, style):
|
|||||||
else:
|
else:
|
||||||
shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"]
|
shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"]
|
||||||
|
|
||||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
|
|
||||||
|
|
||||||
|
|
||||||
def state_modifier(state):
|
def state_modifier(state):
|
||||||
state['stream'] = False
|
state['stream'] = False
|
||||||
@ -169,15 +165,20 @@ def ui():
|
|||||||
# Convert history with confirmation
|
# Convert history with confirmation
|
||||||
convert_arr = [convert_confirm, convert, convert_cancel]
|
convert_arr = [convert_confirm, convert, convert_cancel]
|
||||||
convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
|
convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
|
||||||
convert_confirm.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
|
convert_confirm.click(
|
||||||
convert_confirm.click(remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'])
|
lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then(
|
||||||
convert_confirm.click(chat.save_history, shared.gradio['mode'], [], show_progress=False)
|
remove_tts_from_history, None, None).then(
|
||||||
|
chat.save_history, shared.gradio['mode'], None, show_progress=False).then(
|
||||||
|
chat.redraw_html, shared.reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
|
convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
|
||||||
|
|
||||||
# Toggle message text in history
|
# Toggle message text in history
|
||||||
show_text.change(lambda x: params.update({"show_text": x}), show_text, None)
|
show_text.change(
|
||||||
show_text.change(toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'])
|
lambda x: params.update({"show_text": x}), show_text, None).then(
|
||||||
show_text.change(chat.save_history, shared.gradio['mode'], [], show_progress=False)
|
toggle_text_in_history, None, None).then(
|
||||||
|
chat.save_history, shared.gradio['mode'], None, show_progress=False).then(
|
||||||
|
chat.redraw_html, shared.reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
# Event functions to update the parameters in the backend
|
# Event functions to update the parameters in the backend
|
||||||
activate.change(lambda x: params.update({"activate": x}), activate, None)
|
activate.change(lambda x: params.update({"activate": x}), activate, None)
|
||||||
|
@ -188,7 +188,7 @@ def chatbot_wrapper(text, state, regenerate=False, _continue=False):
|
|||||||
# Generate
|
# Generate
|
||||||
for i in range(state['chat_generation_attempts']):
|
for i in range(state['chat_generation_attempts']):
|
||||||
reply = None
|
reply = None
|
||||||
for j, reply in enumerate(generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings)):
|
for j, reply in enumerate(generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings, is_chat=True)):
|
||||||
reply = cumulative_reply + reply
|
reply = cumulative_reply + reply
|
||||||
|
|
||||||
# Extracting the reply
|
# Extracting the reply
|
||||||
@ -242,7 +242,7 @@ def impersonate_wrapper(text, state):
|
|||||||
cumulative_reply = text
|
cumulative_reply = text
|
||||||
for i in range(state['chat_generation_attempts']):
|
for i in range(state['chat_generation_attempts']):
|
||||||
reply = None
|
reply = None
|
||||||
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings):
|
for reply in generate_reply(f"{prompt}{' ' if len(cumulative_reply) > 0 else ''}{cumulative_reply}", state, eos_token=eos_token, stopping_strings=stopping_strings, is_chat=True):
|
||||||
reply = cumulative_reply + reply
|
reply = cumulative_reply + reply
|
||||||
reply, next_character_found = extract_message_from_reply(reply, state)
|
reply, next_character_found = extract_message_from_reply(reply, state)
|
||||||
yield reply
|
yield reply
|
||||||
@ -255,35 +255,31 @@ def impersonate_wrapper(text, state):
|
|||||||
yield reply
|
yield reply
|
||||||
|
|
||||||
|
|
||||||
def cai_chatbot_wrapper(text, state):
|
def generate_chat_reply(text, state, regenerate=False, _continue=False):
|
||||||
for history in chatbot_wrapper(text, state):
|
if regenerate or _continue:
|
||||||
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'])
|
text = ''
|
||||||
|
|
||||||
|
|
||||||
def regenerate_wrapper(text, state):
|
|
||||||
if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0:
|
if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0:
|
||||||
yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'], state['chat_style'])
|
yield shared.history['visible']
|
||||||
else:
|
return
|
||||||
for history in chatbot_wrapper('', state, regenerate=True):
|
|
||||||
|
for history in chatbot_wrapper(text, state, regenerate=regenerate, _continue=_continue):
|
||||||
|
yield history
|
||||||
|
|
||||||
|
|
||||||
|
# Same as above but returns HTML
|
||||||
|
def generate_chat_reply_wrapper(text, state, regenerate=False, _continue=False):
|
||||||
|
for history in generate_chat_reply(text, state, regenerate, _continue):
|
||||||
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'])
|
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'])
|
||||||
|
|
||||||
|
|
||||||
def continue_wrapper(text, state):
|
def remove_last_message():
|
||||||
if (len(shared.history['visible']) == 1 and not shared.history['visible'][0][0]) or len(shared.history['internal']) == 0:
|
|
||||||
yield chat_html_wrapper(shared.history['visible'], state['name1'], state['name2'], state['mode'], state['chat_style'])
|
|
||||||
else:
|
|
||||||
for history in chatbot_wrapper('', state, _continue=True):
|
|
||||||
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style'])
|
|
||||||
|
|
||||||
|
|
||||||
def remove_last_message(name1, name2, mode, style):
|
|
||||||
if len(shared.history['visible']) > 0 and shared.history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>':
|
if len(shared.history['visible']) > 0 and shared.history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>':
|
||||||
last = shared.history['visible'].pop()
|
last = shared.history['visible'].pop()
|
||||||
shared.history['internal'].pop()
|
shared.history['internal'].pop()
|
||||||
else:
|
else:
|
||||||
last = ['', '']
|
last = ['', '']
|
||||||
|
|
||||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style), last[0]
|
return last[0]
|
||||||
|
|
||||||
|
|
||||||
def send_last_reply_to_input():
|
def send_last_reply_to_input():
|
||||||
@ -293,35 +289,27 @@ def send_last_reply_to_input():
|
|||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
|
||||||
def replace_last_reply(text, name1, name2, mode, style):
|
def replace_last_reply(text):
|
||||||
if len(shared.history['visible']) > 0:
|
if len(shared.history['visible']) > 0:
|
||||||
shared.history['visible'][-1][1] = text
|
shared.history['visible'][-1][1] = text
|
||||||
shared.history['internal'][-1][1] = apply_extensions("input", text)
|
shared.history['internal'][-1][1] = apply_extensions("input", text)
|
||||||
|
|
||||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
|
|
||||||
|
|
||||||
|
def send_dummy_message(text):
|
||||||
def send_dummy_message(text, name1, name2, mode, style):
|
|
||||||
shared.history['visible'].append([text, ''])
|
shared.history['visible'].append([text, ''])
|
||||||
shared.history['internal'].append([apply_extensions("input", text), ''])
|
shared.history['internal'].append([apply_extensions("input", text), ''])
|
||||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
|
|
||||||
|
|
||||||
|
|
||||||
def send_dummy_reply(text, name1, name2, mode, style):
|
def send_dummy_reply(text):
|
||||||
if len(shared.history['visible']) > 0 and not shared.history['visible'][-1][1] == '':
|
if len(shared.history['visible']) > 0 and not shared.history['visible'][-1][1] == '':
|
||||||
shared.history['visible'].append(['', ''])
|
shared.history['visible'].append(['', ''])
|
||||||
shared.history['internal'].append(['', ''])
|
shared.history['internal'].append(['', ''])
|
||||||
|
|
||||||
shared.history['visible'][-1][1] = text
|
shared.history['visible'][-1][1] = text
|
||||||
shared.history['internal'][-1][1] = apply_extensions("input", text)
|
shared.history['internal'][-1][1] = apply_extensions("input", text)
|
||||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
|
|
||||||
|
|
||||||
|
|
||||||
def clear_html():
|
def clear_chat_log(greeting, mode):
|
||||||
return chat_html_wrapper([], "", "")
|
|
||||||
|
|
||||||
|
|
||||||
def clear_chat_log(name1, name2, greeting, mode, style):
|
|
||||||
shared.history['visible'] = []
|
shared.history['visible'] = []
|
||||||
shared.history['internal'] = []
|
shared.history['internal'] = []
|
||||||
|
|
||||||
@ -332,14 +320,12 @@ def clear_chat_log(name1, name2, greeting, mode, style):
|
|||||||
|
|
||||||
save_history(mode)
|
save_history(mode)
|
||||||
|
|
||||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
|
|
||||||
|
def redraw_html(name1, name2, mode, style, reset_cache=False):
|
||||||
|
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style, reset_cache=reset_cache)
|
||||||
|
|
||||||
|
|
||||||
def redraw_html(name1, name2, mode, style):
|
def tokenize_dialogue(dialogue, name1, name2):
|
||||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
|
|
||||||
|
|
||||||
|
|
||||||
def tokenize_dialogue(dialogue, name1, name2, mode, style):
|
|
||||||
history = []
|
history = []
|
||||||
messages = []
|
messages = []
|
||||||
dialogue = re.sub('<START>', '', dialogue)
|
dialogue = re.sub('<START>', '', dialogue)
|
||||||
@ -447,7 +433,7 @@ def generate_pfp_cache(character):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def load_character(character, name1, name2, mode, style):
|
def load_character(character, name1, name2, mode):
|
||||||
shared.character = character
|
shared.character = character
|
||||||
context = greeting = turn_template = ""
|
context = greeting = turn_template = ""
|
||||||
greeting_field = 'greeting'
|
greeting_field = 'greeting'
|
||||||
@ -521,7 +507,7 @@ def load_character(character, name1, name2, mode, style):
|
|||||||
# Create .json log files since they don't already exist
|
# Create .json log files since they don't already exist
|
||||||
save_history(mode)
|
save_history(mode)
|
||||||
|
|
||||||
return name1, name2, picture, greeting, context, repr(turn_template)[1:-1], chat_html_wrapper(shared.history['visible'], name1, name2, mode, style)
|
return name1, name2, picture, greeting, context, repr(turn_template)[1:-1]
|
||||||
|
|
||||||
|
|
||||||
def upload_character(json_file, img, tavern=False):
|
def upload_character(json_file, img, tavern=False):
|
||||||
@ -556,7 +542,7 @@ def upload_tavern_character(img, name1, name2):
|
|||||||
return upload_character(json.dumps(_json), img, tavern=True)
|
return upload_character(json.dumps(_json), img, tavern=True)
|
||||||
|
|
||||||
|
|
||||||
def upload_your_profile_picture(img, name1, name2, mode, style):
|
def upload_your_profile_picture(img):
|
||||||
cache_folder = Path("cache")
|
cache_folder = Path("cache")
|
||||||
if not cache_folder.exists():
|
if not cache_folder.exists():
|
||||||
cache_folder.mkdir()
|
cache_folder.mkdir()
|
||||||
@ -568,5 +554,3 @@ def upload_your_profile_picture(img, name1, name2, mode, style):
|
|||||||
img = make_thumbnail(img)
|
img = make_thumbnail(img)
|
||||||
img.save(Path('cache/pfp_me.png'))
|
img.save(Path('cache/pfp_me.png'))
|
||||||
logging.info('Profile picture saved to "cache/pfp_me.png"')
|
logging.info('Profile picture saved to "cache/pfp_me.png"')
|
||||||
|
|
||||||
return chat_html_wrapper(shared.history['visible'], name1, name2, mode, style, reset_cache=True)
|
|
||||||
|
@ -101,10 +101,10 @@ def fix_galactica(s):
|
|||||||
return s
|
return s
|
||||||
|
|
||||||
|
|
||||||
def get_reply_from_output_ids(output_ids, input_ids, original_question, state):
|
def get_reply_from_output_ids(output_ids, input_ids, original_question, state, is_chat=False):
|
||||||
if shared.model_type == 'HF_seq2seq':
|
if shared.model_type == 'HF_seq2seq':
|
||||||
reply = decode(output_ids, state['skip_special_tokens'])
|
reply = decode(output_ids, state['skip_special_tokens'])
|
||||||
if not shared.is_chat():
|
if not is_chat:
|
||||||
reply = apply_extensions('output', reply)
|
reply = apply_extensions('output', reply)
|
||||||
else:
|
else:
|
||||||
new_tokens = len(output_ids) - len(input_ids[0])
|
new_tokens = len(output_ids) - len(input_ids[0])
|
||||||
@ -114,14 +114,13 @@ def get_reply_from_output_ids(output_ids, input_ids, original_question, state):
|
|||||||
if len(original_question) > 0 and original_question[-1] not in [' ', '\n']:
|
if len(original_question) > 0 and original_question[-1] not in [' ', '\n']:
|
||||||
reply = ' ' + reply
|
reply = ' ' + reply
|
||||||
|
|
||||||
if not shared.is_chat():
|
if not is_chat:
|
||||||
reply = original_question + apply_extensions('output', reply)
|
reply = original_question + apply_extensions('output', reply)
|
||||||
|
|
||||||
return reply
|
return reply
|
||||||
|
|
||||||
|
|
||||||
def formatted_outputs(reply, model_name):
|
def formatted_outputs(reply, model_name):
|
||||||
if not shared.is_chat():
|
|
||||||
if shared.model_type == 'galactica':
|
if shared.model_type == 'galactica':
|
||||||
reply = fix_galactica(reply)
|
reply = fix_galactica(reply)
|
||||||
return reply, reply, generate_basic_html(reply)
|
return reply, reply, generate_basic_html(reply)
|
||||||
@ -130,8 +129,6 @@ def formatted_outputs(reply, model_name):
|
|||||||
return reply, 'Only applicable for GALACTICA models.', generate_4chan_html(reply)
|
return reply, 'Only applicable for GALACTICA models.', generate_4chan_html(reply)
|
||||||
else:
|
else:
|
||||||
return reply, 'Only applicable for GALACTICA models.', generate_basic_html(reply)
|
return reply, 'Only applicable for GALACTICA models.', generate_basic_html(reply)
|
||||||
else:
|
|
||||||
return reply
|
|
||||||
|
|
||||||
|
|
||||||
def set_manual_seed(seed):
|
def set_manual_seed(seed):
|
||||||
@ -150,13 +147,18 @@ def stop_everything_event():
|
|||||||
shared.stop_everything = True
|
shared.stop_everything = True
|
||||||
|
|
||||||
|
|
||||||
def generate_reply(question, state, eos_token=None, stopping_strings=None):
|
def generate_reply_wrapper(question, state, eos_token=None, stopping_strings=None):
|
||||||
|
for reply in generate_reply(question, state, eos_token, stopping_strings, is_chat=False):
|
||||||
|
yield formatted_outputs(reply, shared.model_name)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_reply(question, state, eos_token=None, stopping_strings=None, is_chat=False):
|
||||||
state = apply_extensions('state', state)
|
state = apply_extensions('state', state)
|
||||||
generate_func = apply_extensions('custom_generate_reply')
|
generate_func = apply_extensions('custom_generate_reply')
|
||||||
if generate_func is None:
|
if generate_func is None:
|
||||||
if shared.model_name == 'None' or shared.model is None:
|
if shared.model_name == 'None' or shared.model is None:
|
||||||
logging.error("No model is loaded! Select one in the Model tab.")
|
logging.error("No model is loaded! Select one in the Model tab.")
|
||||||
yield formatted_outputs(question, shared.model_name)
|
yield question
|
||||||
return
|
return
|
||||||
|
|
||||||
if shared.model_type in ['rwkv', 'llamacpp']:
|
if shared.model_type in ['rwkv', 'llamacpp']:
|
||||||
@ -168,7 +170,7 @@ def generate_reply(question, state, eos_token=None, stopping_strings=None):
|
|||||||
|
|
||||||
# Preparing the input
|
# Preparing the input
|
||||||
original_question = question
|
original_question = question
|
||||||
if not shared.is_chat():
|
if not is_chat:
|
||||||
question = apply_extensions('input', question)
|
question = apply_extensions('input', question)
|
||||||
|
|
||||||
if shared.args.verbose:
|
if shared.args.verbose:
|
||||||
@ -177,11 +179,11 @@ def generate_reply(question, state, eos_token=None, stopping_strings=None):
|
|||||||
shared.stop_everything = False
|
shared.stop_everything = False
|
||||||
clear_torch_cache()
|
clear_torch_cache()
|
||||||
seed = set_manual_seed(state['seed'])
|
seed = set_manual_seed(state['seed'])
|
||||||
for reply in generate_func(question, original_question, seed, state, eos_token, stopping_strings):
|
for reply in generate_func(question, original_question, seed, state, eos_token, stopping_strings, is_chat=is_chat):
|
||||||
yield formatted_outputs(reply, shared.model_name)
|
yield reply
|
||||||
|
|
||||||
|
|
||||||
def generate_reply_HF(question, original_question, seed, state, eos_token=None, stopping_strings=None):
|
def generate_reply_HF(question, original_question, seed, state, eos_token=None, stopping_strings=None, is_chat=False):
|
||||||
generate_params = {}
|
generate_params = {}
|
||||||
for k in ['max_new_tokens', 'do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']:
|
for k in ['max_new_tokens', 'do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']:
|
||||||
generate_params[k] = state[k]
|
generate_params[k] = state[k]
|
||||||
@ -233,7 +235,7 @@ def generate_reply_HF(question, original_question, seed, state, eos_token=None,
|
|||||||
|
|
||||||
t0 = time.time()
|
t0 = time.time()
|
||||||
try:
|
try:
|
||||||
if not shared.is_chat() and shared.model_type != 'HF_seq2seq':
|
if not is_chat and shared.model_type != 'HF_seq2seq':
|
||||||
yield original_question
|
yield original_question
|
||||||
|
|
||||||
# Generate the entire reply at once.
|
# Generate the entire reply at once.
|
||||||
@ -246,7 +248,7 @@ def generate_reply_HF(question, original_question, seed, state, eos_token=None,
|
|||||||
if shared.soft_prompt:
|
if shared.soft_prompt:
|
||||||
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
||||||
|
|
||||||
yield get_reply_from_output_ids(output, input_ids, original_question, state)
|
yield get_reply_from_output_ids(output, input_ids, original_question, state, is_chat=is_chat)
|
||||||
|
|
||||||
# Stream the reply 1 token at a time.
|
# Stream the reply 1 token at a time.
|
||||||
# This is based on the trick of using 'stopping_criteria' to create an iterator.
|
# This is based on the trick of using 'stopping_criteria' to create an iterator.
|
||||||
@ -266,7 +268,7 @@ def generate_reply_HF(question, original_question, seed, state, eos_token=None,
|
|||||||
if shared.soft_prompt:
|
if shared.soft_prompt:
|
||||||
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:]))
|
||||||
|
|
||||||
yield get_reply_from_output_ids(output, input_ids, original_question, state)
|
yield get_reply_from_output_ids(output, input_ids, original_question, state, is_chat=is_chat)
|
||||||
if output[-1] in eos_token_ids:
|
if output[-1] in eos_token_ids:
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -280,7 +282,7 @@ def generate_reply_HF(question, original_question, seed, state, eos_token=None,
|
|||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def generate_reply_custom(question, original_question, seed, state, eos_token=None, stopping_strings=None):
|
def generate_reply_custom(question, original_question, seed, state, eos_token=None, stopping_strings=None, is_chat=False):
|
||||||
seed = set_manual_seed(state['seed'])
|
seed = set_manual_seed(state['seed'])
|
||||||
generate_params = {'token_count': state['max_new_tokens']}
|
generate_params = {'token_count': state['max_new_tokens']}
|
||||||
for k in ['temperature', 'top_p', 'top_k', 'repetition_penalty']:
|
for k in ['temperature', 'top_p', 'top_k', 'repetition_penalty']:
|
||||||
@ -288,13 +290,13 @@ def generate_reply_custom(question, original_question, seed, state, eos_token=No
|
|||||||
|
|
||||||
t0 = time.time()
|
t0 = time.time()
|
||||||
try:
|
try:
|
||||||
if not shared.is_chat():
|
if not is_chat:
|
||||||
yield question
|
yield question
|
||||||
|
|
||||||
if not state['stream']:
|
if not state['stream']:
|
||||||
reply = shared.model.generate(context=question, **generate_params)
|
reply = shared.model.generate(context=question, **generate_params)
|
||||||
output = original_question + reply
|
output = original_question + reply
|
||||||
if not shared.is_chat():
|
if not is_chat:
|
||||||
reply = original_question + apply_extensions('output', reply)
|
reply = original_question + apply_extensions('output', reply)
|
||||||
|
|
||||||
yield reply
|
yield reply
|
||||||
@ -302,7 +304,7 @@ def generate_reply_custom(question, original_question, seed, state, eos_token=No
|
|||||||
|
|
||||||
for reply in shared.model.generate_with_streaming(context=question, **generate_params):
|
for reply in shared.model.generate_with_streaming(context=question, **generate_params):
|
||||||
output = original_question + reply
|
output = original_question + reply
|
||||||
if not shared.is_chat():
|
if not is_chat:
|
||||||
reply = original_question + apply_extensions('output', reply)
|
reply = original_question + apply_extensions('output', reply)
|
||||||
|
|
||||||
yield reply
|
yield reply
|
||||||
@ -317,7 +319,7 @@ def generate_reply_custom(question, original_question, seed, state, eos_token=No
|
|||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def generate_reply_flexgen(question, original_question, seed, state, eos_token=None, stopping_strings=None):
|
def generate_reply_flexgen(question, original_question, seed, state, eos_token=None, stopping_strings=None, is_chat=False):
|
||||||
generate_params = {}
|
generate_params = {}
|
||||||
for k in ['max_new_tokens', 'do_sample', 'temperature']:
|
for k in ['max_new_tokens', 'do_sample', 'temperature']:
|
||||||
generate_params[k] = state[k]
|
generate_params[k] = state[k]
|
||||||
@ -346,7 +348,7 @@ def generate_reply_flexgen(question, original_question, seed, state, eos_token=N
|
|||||||
|
|
||||||
t0 = time.time()
|
t0 = time.time()
|
||||||
try:
|
try:
|
||||||
if not shared.is_chat():
|
if not is_chat:
|
||||||
yield question
|
yield question
|
||||||
|
|
||||||
# Generate the entire reply at once.
|
# Generate the entire reply at once.
|
||||||
@ -354,7 +356,7 @@ def generate_reply_flexgen(question, original_question, seed, state, eos_token=N
|
|||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
output = shared.model.generate(**generate_params)[0]
|
output = shared.model.generate(**generate_params)[0]
|
||||||
|
|
||||||
yield get_reply_from_output_ids(output, input_ids, original_question, state)
|
yield get_reply_from_output_ids(output, input_ids, original_question, state, is_chat=is_chat)
|
||||||
|
|
||||||
# Stream the output naively for FlexGen since it doesn't support 'stopping_criteria'
|
# Stream the output naively for FlexGen since it doesn't support 'stopping_criteria'
|
||||||
else:
|
else:
|
||||||
|
63
server.py
63
server.py
@ -48,7 +48,7 @@ from modules import chat, shared, training, ui, utils
|
|||||||
from modules.html_generator import chat_html_wrapper
|
from modules.html_generator import chat_html_wrapper
|
||||||
from modules.LoRA import add_lora_to_model
|
from modules.LoRA import add_lora_to_model
|
||||||
from modules.models import load_model, load_soft_prompt, unload_model
|
from modules.models import load_model, load_soft_prompt, unload_model
|
||||||
from modules.text_generation import generate_reply, get_encoded_length, stop_everything_event
|
from modules.text_generation import generate_reply_wrapper, get_encoded_length, stop_everything_event
|
||||||
|
|
||||||
|
|
||||||
def load_model_wrapper(selected_model, autoload=False):
|
def load_model_wrapper(selected_model, autoload=False):
|
||||||
@ -723,26 +723,26 @@ def create_interface():
|
|||||||
gen_events.append(shared.gradio['Generate'].click(
|
gen_events.append(shared.gradio['Generate'].click(
|
||||||
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
||||||
lambda x: (x, ''), shared.gradio['textbox'], [shared.gradio['Chat input'], shared.gradio['textbox']], show_progress=False).then(
|
lambda x: (x, ''), shared.gradio['textbox'], [shared.gradio['Chat input'], shared.gradio['textbox']], show_progress=False).then(
|
||||||
chat.cai_chatbot_wrapper, shared.input_params, shared.gradio['display'], show_progress=False).then(
|
chat.generate_chat_reply_wrapper, shared.input_params, shared.gradio['display'], show_progress=False).then(
|
||||||
chat.save_history, shared.gradio['mode'], None, show_progress=False)
|
chat.save_history, shared.gradio['mode'], None, show_progress=False)
|
||||||
)
|
)
|
||||||
|
|
||||||
gen_events.append(shared.gradio['textbox'].submit(
|
gen_events.append(shared.gradio['textbox'].submit(
|
||||||
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
||||||
lambda x: (x, ''), shared.gradio['textbox'], [shared.gradio['Chat input'], shared.gradio['textbox']], show_progress=False).then(
|
lambda x: (x, ''), shared.gradio['textbox'], [shared.gradio['Chat input'], shared.gradio['textbox']], show_progress=False).then(
|
||||||
chat.cai_chatbot_wrapper, shared.input_params, shared.gradio['display'], show_progress=False).then(
|
chat.generate_chat_reply_wrapper, shared.input_params, shared.gradio['display'], show_progress=False).then(
|
||||||
chat.save_history, shared.gradio['mode'], None, show_progress=False)
|
chat.save_history, shared.gradio['mode'], None, show_progress=False)
|
||||||
)
|
)
|
||||||
|
|
||||||
gen_events.append(shared.gradio['Regenerate'].click(
|
gen_events.append(shared.gradio['Regenerate'].click(
|
||||||
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
||||||
chat.regenerate_wrapper, shared.input_params, shared.gradio['display'], show_progress=False).then(
|
partial(chat.generate_chat_reply_wrapper, regenerate=True), shared.input_params, shared.gradio['display'], show_progress=False).then(
|
||||||
chat.save_history, shared.gradio['mode'], None, show_progress=False)
|
chat.save_history, shared.gradio['mode'], None, show_progress=False)
|
||||||
)
|
)
|
||||||
|
|
||||||
gen_events.append(shared.gradio['Continue'].click(
|
gen_events.append(shared.gradio['Continue'].click(
|
||||||
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
||||||
chat.continue_wrapper, shared.input_params, shared.gradio['display'], show_progress=False).then(
|
partial(chat.generate_chat_reply_wrapper, _continue=True), shared.input_params, shared.gradio['display'], show_progress=False).then(
|
||||||
chat.save_history, shared.gradio['mode'], None, show_progress=False)
|
chat.save_history, shared.gradio['mode'], None, show_progress=False)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -753,24 +753,28 @@ def create_interface():
|
|||||||
)
|
)
|
||||||
|
|
||||||
shared.gradio['Replace last reply'].click(
|
shared.gradio['Replace last reply'].click(
|
||||||
chat.replace_last_reply, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'], show_progress=False).then(
|
chat.replace_last_reply, shared.gradio['textbox'], None).then(
|
||||||
lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False).then(
|
lambda: '', None, shared.gradio['textbox'], show_progress=False).then(
|
||||||
chat.save_history, shared.gradio['mode'], None, show_progress=False)
|
chat.save_history, shared.gradio['mode'], None, show_progress=False).then(
|
||||||
|
chat.redraw_html, reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
shared.gradio['Send dummy message'].click(
|
shared.gradio['Send dummy message'].click(
|
||||||
chat.send_dummy_message, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'], show_progress=False).then(
|
chat.send_dummy_message, shared.gradio['textbox'], None).then(
|
||||||
lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False).then(
|
lambda: '', None, shared.gradio['textbox'], show_progress=False).then(
|
||||||
chat.save_history, shared.gradio['mode'], None, show_progress=False)
|
chat.save_history, shared.gradio['mode'], None, show_progress=False).then(
|
||||||
|
chat.redraw_html, reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
shared.gradio['Send dummy reply'].click(
|
shared.gradio['Send dummy reply'].click(
|
||||||
chat.send_dummy_reply, [shared.gradio[k] for k in ['textbox', 'name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'], show_progress=False).then(
|
chat.send_dummy_reply, shared.gradio['textbox'], None).then(
|
||||||
lambda x: '', shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False).then(
|
lambda: '', None, shared.gradio['textbox'], show_progress=False).then(
|
||||||
chat.save_history, shared.gradio['mode'], None, show_progress=False)
|
chat.save_history, shared.gradio['mode'], None, show_progress=False).then(
|
||||||
|
chat.redraw_html, reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
shared.gradio['Clear history-confirm'].click(
|
shared.gradio['Clear history-confirm'].click(
|
||||||
lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr).then(
|
lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr).then(
|
||||||
chat.clear_chat_log, [shared.gradio[k] for k in ['name1', 'name2', 'greeting', 'mode', 'chat_style']], shared.gradio['display']).then(
|
chat.clear_chat_log, [shared.gradio[k] for k in ['greeting', 'mode']], None).then(
|
||||||
chat.save_history, shared.gradio['mode'], None, show_progress=False)
|
chat.save_history, shared.gradio['mode'], None, show_progress=False).then(
|
||||||
|
chat.redraw_html, reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
shared.gradio['Stop'].click(
|
shared.gradio['Stop'].click(
|
||||||
stop_everything_event, None, None, queue=False, cancels=gen_events if shared.args.no_stream else None).then(
|
stop_everything_event, None, None, queue=False, cancels=gen_events if shared.args.no_stream else None).then(
|
||||||
@ -783,8 +787,7 @@ def create_interface():
|
|||||||
|
|
||||||
shared.gradio['chat_style'].change(chat.redraw_html, reload_inputs, shared.gradio['display'])
|
shared.gradio['chat_style'].change(chat.redraw_html, reload_inputs, shared.gradio['display'])
|
||||||
shared.gradio['instruction_template'].change(
|
shared.gradio['instruction_template'].change(
|
||||||
chat.load_character, [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct', 'mode', 'chat_style']], [shared.gradio[k] for k in ['name1_instruct', 'name2_instruct', 'dummy', 'dummy', 'context_instruct', 'turn_template', 'display']]).then(
|
chat.load_character, [shared.gradio[k] for k in ['instruction_template', 'name1_instruct', 'name2_instruct', 'mode']], [shared.gradio[k] for k in ['name1_instruct', 'name2_instruct', 'dummy', 'dummy', 'context_instruct', 'turn_template']])
|
||||||
chat.redraw_html, reload_inputs, shared.gradio['display'])
|
|
||||||
|
|
||||||
shared.gradio['upload_chat_history'].upload(
|
shared.gradio['upload_chat_history'].upload(
|
||||||
chat.load_history, [shared.gradio[k] for k in ['upload_chat_history', 'name1', 'name2']], None).then(
|
chat.load_history, [shared.gradio[k] for k in ['upload_chat_history', 'name1', 'name2']], None).then(
|
||||||
@ -793,12 +796,22 @@ def create_interface():
|
|||||||
shared.gradio['Copy last reply'].click(chat.send_last_reply_to_input, None, shared.gradio['textbox'], show_progress=False)
|
shared.gradio['Copy last reply'].click(chat.send_last_reply_to_input, None, shared.gradio['textbox'], show_progress=False)
|
||||||
shared.gradio['Clear history'].click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, clear_arr)
|
shared.gradio['Clear history'].click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, clear_arr)
|
||||||
shared.gradio['Clear history-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr)
|
shared.gradio['Clear history-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr)
|
||||||
shared.gradio['Remove last'].click(chat.remove_last_message, [shared.gradio[k] for k in ['name1', 'name2', 'mode', 'chat_style']], [shared.gradio['display'], shared.gradio['textbox']], show_progress=False)
|
shared.gradio['Remove last'].click(
|
||||||
|
chat.remove_last_message, None, shared.gradio['textbox'], show_progress=False).then(
|
||||||
|
chat.save_history, shared.gradio['mode'], None, show_progress=False).then(
|
||||||
|
chat.redraw_html, reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
shared.gradio['download_button'].click(lambda x: chat.save_history(x, timestamp=True), shared.gradio['mode'], shared.gradio['download'])
|
shared.gradio['download_button'].click(lambda x: chat.save_history(x, timestamp=True), shared.gradio['mode'], shared.gradio['download'])
|
||||||
shared.gradio['Upload character'].click(chat.upload_character, [shared.gradio['upload_json'], shared.gradio['upload_img_bot']], [shared.gradio['character_menu']])
|
shared.gradio['Upload character'].click(chat.upload_character, [shared.gradio['upload_json'], shared.gradio['upload_img_bot']], [shared.gradio['character_menu']])
|
||||||
shared.gradio['character_menu'].change(chat.load_character, [shared.gradio[k] for k in ['character_menu', 'name1', 'name2', 'mode', 'chat_style']], [shared.gradio[k] for k in ['name1', 'name2', 'character_picture', 'greeting', 'context', 'turn_template', 'display']])
|
shared.gradio['character_menu'].change(
|
||||||
|
chat.load_character, [shared.gradio[k] for k in ['character_menu', 'name1', 'name2', 'mode']], [shared.gradio[k] for k in ['name1', 'name2', 'character_picture', 'greeting', 'context', 'dummy']]).then(
|
||||||
|
chat.redraw_html, reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
shared.gradio['upload_img_tavern'].upload(chat.upload_tavern_character, [shared.gradio['upload_img_tavern'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['character_menu']])
|
shared.gradio['upload_img_tavern'].upload(chat.upload_tavern_character, [shared.gradio['upload_img_tavern'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['character_menu']])
|
||||||
shared.gradio['your_picture'].change(chat.upload_your_profile_picture, [shared.gradio[k] for k in ['your_picture', 'name1', 'name2', 'mode', 'chat_style']], shared.gradio['display'])
|
shared.gradio['your_picture'].change(
|
||||||
|
chat.upload_your_profile_picture, shared.gradio['your_picture'], None).then(
|
||||||
|
partial(chat.redraw_html, reset_cache=True), reload_inputs, shared.gradio['display'])
|
||||||
|
|
||||||
shared.gradio['interface'].load(None, None, None, _js=f"() => {{{ui.main_js+ui.chat_js}}}")
|
shared.gradio['interface'].load(None, None, None, _js=f"() => {{{ui.main_js+ui.chat_js}}}")
|
||||||
|
|
||||||
# notebook/default modes event handlers
|
# notebook/default modes event handlers
|
||||||
@ -812,14 +825,14 @@ def create_interface():
|
|||||||
gen_events.append(shared.gradio['Generate'].click(
|
gen_events.append(shared.gradio['Generate'].click(
|
||||||
lambda x: x, shared.gradio['textbox'], shared.gradio['last_input']).then(
|
lambda x: x, shared.gradio['textbox'], shared.gradio['last_input']).then(
|
||||||
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
||||||
generate_reply, shared.input_params, output_params, show_progress=False) # .then(
|
generate_reply_wrapper, shared.input_params, output_params, show_progress=False) # .then(
|
||||||
# None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}")
|
# None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}")
|
||||||
)
|
)
|
||||||
|
|
||||||
gen_events.append(shared.gradio['textbox'].submit(
|
gen_events.append(shared.gradio['textbox'].submit(
|
||||||
lambda x: x, shared.gradio['textbox'], shared.gradio['last_input']).then(
|
lambda x: x, shared.gradio['textbox'], shared.gradio['last_input']).then(
|
||||||
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
||||||
generate_reply, shared.input_params, output_params, show_progress=False) # .then(
|
generate_reply_wrapper, shared.input_params, output_params, show_progress=False) # .then(
|
||||||
# None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}")
|
# None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -828,13 +841,13 @@ def create_interface():
|
|||||||
gen_events.append(shared.gradio['Regenerate'].click(
|
gen_events.append(shared.gradio['Regenerate'].click(
|
||||||
lambda x: x, shared.gradio['last_input'], shared.gradio['textbox'], show_progress=False).then(
|
lambda x: x, shared.gradio['last_input'], shared.gradio['textbox'], show_progress=False).then(
|
||||||
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
||||||
generate_reply, shared.input_params, output_params, show_progress=False) # .then(
|
generate_reply_wrapper, shared.input_params, output_params, show_progress=False) # .then(
|
||||||
# None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}")
|
# None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}")
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
gen_events.append(shared.gradio['Continue'].click(
|
gen_events.append(shared.gradio['Continue'].click(
|
||||||
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
ui.gather_interface_values, [shared.gradio[k] for k in shared.input_elements], shared.gradio['interface_state']).then(
|
||||||
generate_reply, [shared.gradio['output_textbox']] + shared.input_params[1:], output_params, show_progress=False) # .then(
|
generate_reply_wrapper, [shared.gradio['output_textbox']] + shared.input_params[1:], output_params, show_progress=False) # .then(
|
||||||
# None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[1]; element.scrollTop = element.scrollHeight}")
|
# None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[1]; element.scrollTop = element.scrollHeight}")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user