mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-25 09:19:23 +01:00
Move all gradio elements to shared (so that extensions can use them)
This commit is contained in:
parent
0a3590da8c
commit
65326b545a
@ -253,7 +253,7 @@ def tokenize_dialogue(dialogue, name1, name2):
|
|||||||
_history.append(entry)
|
_history.append(entry)
|
||||||
entry = ['', '']
|
entry = ['', '']
|
||||||
|
|
||||||
print(f"\033[1;32;1m\nDialogue tokenized to:\033[0;37;0m\n", end='')
|
print("\033[1;32;1m\nDialogue tokenized to:\033[0;37;0m\n", end='')
|
||||||
for row in _history:
|
for row in _history:
|
||||||
for column in row:
|
for column in row:
|
||||||
print("\n")
|
print("\n")
|
||||||
@ -301,8 +301,8 @@ def load_history(file, name1, name2):
|
|||||||
shared.history['visible'] = copy.deepcopy(shared.history['internal'])
|
shared.history['visible'] = copy.deepcopy(shared.history['internal'])
|
||||||
|
|
||||||
def load_default_history(name1, name2):
|
def load_default_history(name1, name2):
|
||||||
if Path(f'logs/persistent.json').exists():
|
if Path('logs/persistent.json').exists():
|
||||||
load_history(open(Path(f'logs/persistent.json'), 'rb').read(), name1, name2)
|
load_history(open(Path('logs/persistent.json'), 'rb').read(), name1, name2)
|
||||||
else:
|
else:
|
||||||
shared.history['internal'] = []
|
shared.history['internal'] = []
|
||||||
shared.history['visible'] = []
|
shared.history['visible'] = []
|
||||||
@ -370,5 +370,5 @@ def upload_tavern_character(img, name1, name2):
|
|||||||
|
|
||||||
def upload_your_profile_picture(img):
|
def upload_your_profile_picture(img):
|
||||||
img = Image.open(io.BytesIO(img))
|
img = Image.open(io.BytesIO(img))
|
||||||
img.save(Path(f'img_me.png'))
|
img.save(Path('img_me.png'))
|
||||||
print(f'Profile picture saved to "img_me.png"')
|
print('Profile picture saved to "img_me.png"')
|
||||||
|
@ -13,7 +13,7 @@ def load_extensions():
|
|||||||
print(f'Loading the extension "{name}"... ', end='')
|
print(f'Loading the extension "{name}"... ', end='')
|
||||||
exec(f"import extensions.{name}.script")
|
exec(f"import extensions.{name}.script")
|
||||||
state[name] = [True, i]
|
state[name] = [True, i]
|
||||||
print(f'Ok.')
|
print('Ok.')
|
||||||
|
|
||||||
# This iterator returns the extensions in the order specified in the command-line
|
# This iterator returns the extensions in the order specified in the command-line
|
||||||
def iterator():
|
def iterator():
|
||||||
|
@ -117,7 +117,7 @@ def load_model(model_name):
|
|||||||
model = eval(command)
|
model = eval(command)
|
||||||
|
|
||||||
# Loading the tokenizer
|
# Loading the tokenizer
|
||||||
if shared.model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')) and Path(f"models/gpt-j-6B/").exists():
|
if shared.model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')) and Path("models/gpt-j-6B/").exists():
|
||||||
tokenizer = AutoTokenizer.from_pretrained(Path("models/gpt-j-6B/"))
|
tokenizer = AutoTokenizer.from_pretrained(Path("models/gpt-j-6B/"))
|
||||||
else:
|
else:
|
||||||
tokenizer = AutoTokenizer.from_pretrained(Path(f"models/{shared.model_name}/"))
|
tokenizer = AutoTokenizer.from_pretrained(Path(f"models/{shared.model_name}/"))
|
||||||
|
@ -11,6 +11,9 @@ history = {'internal': [], 'visible': []}
|
|||||||
character = 'None'
|
character = 'None'
|
||||||
stop_everything = False
|
stop_everything = False
|
||||||
|
|
||||||
|
# UI elements (buttons, sliders, HTML, etc)
|
||||||
|
gradio = {}
|
||||||
|
|
||||||
settings = {
|
settings = {
|
||||||
'max_new_tokens': 200,
|
'max_new_tokens': 200,
|
||||||
'max_new_tokens_min': 1,
|
'max_new_tokens_min': 1,
|
||||||
|
@ -126,9 +126,9 @@ def generate_reply(question, tokens, do_sample, max_new_tokens, temperature, top
|
|||||||
if shared.args.deepspeed:
|
if shared.args.deepspeed:
|
||||||
generate_params.append("synced_gpus=True")
|
generate_params.append("synced_gpus=True")
|
||||||
if shared.args.no_stream:
|
if shared.args.no_stream:
|
||||||
generate_params.append(f"max_new_tokens=tokens")
|
generate_params.append("max_new_tokens=tokens")
|
||||||
else:
|
else:
|
||||||
generate_params.append(f"max_new_tokens=8")
|
generate_params.append("max_new_tokens=8")
|
||||||
|
|
||||||
if shared.soft_prompt:
|
if shared.soft_prompt:
|
||||||
inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
|
inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids)
|
||||||
|
230
server.py
230
server.py
@ -100,50 +100,49 @@ def create_settings_menus():
|
|||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
model_menu = gr.Dropdown(choices=available_models, value=shared.model_name, label='Model')
|
shared.gradio['model_menu'] = gr.Dropdown(choices=available_models, value=shared.model_name, label='Model')
|
||||||
ui.create_refresh_button(model_menu, lambda : None, lambda : {"choices": get_available_models()}, "refresh-button")
|
ui.create_refresh_button(shared.gradio['model_menu'], lambda : None, lambda : {"choices": get_available_models()}, "refresh-button")
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
preset_menu = gr.Dropdown(choices=available_presets, value=shared.settings[f'preset{suffix}'] if not shared.args.flexgen else 'Naive', label='Generation parameters preset')
|
shared.gradio['preset_menu'] = gr.Dropdown(choices=available_presets, value=shared.settings[f'preset{suffix}'] if not shared.args.flexgen else 'Naive', label='Generation parameters preset')
|
||||||
ui.create_refresh_button(preset_menu, lambda : None, lambda : {"choices": get_available_presets()}, "refresh-button")
|
ui.create_refresh_button(shared.gradio['preset_menu'], lambda : None, lambda : {"choices": get_available_presets()}, "refresh-button")
|
||||||
|
|
||||||
with gr.Accordion("Custom generation parameters", open=False, elem_id="accordion"):
|
with gr.Accordion("Custom generation parameters", open=False, elem_id="accordion"):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
do_sample = gr.Checkbox(value=generate_params['do_sample'], label="do_sample")
|
shared.gradio['do_sample'] = gr.Checkbox(value=generate_params['do_sample'], label="do_sample")
|
||||||
temperature = gr.Slider(0.01, 1.99, value=generate_params['temperature'], step=0.01, label="temperature")
|
shared.gradio['temperature'] = gr.Slider(0.01, 1.99, value=generate_params['temperature'], step=0.01, label="temperature")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
top_k = gr.Slider(0,200,value=generate_params['top_k'],step=1,label="top_k")
|
shared.gradio['top_k'] = gr.Slider(0,200,value=generate_params['top_k'],step=1,label="top_k")
|
||||||
top_p = gr.Slider(0.0,1.0,value=generate_params['top_p'],step=0.01,label="top_p")
|
shared.gradio['top_p'] = gr.Slider(0.0,1.0,value=generate_params['top_p'],step=0.01,label="top_p")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
repetition_penalty = gr.Slider(1.0,4.99,value=generate_params['repetition_penalty'],step=0.01,label="repetition_penalty")
|
shared.gradio['repetition_penalty'] = gr.Slider(1.0,4.99,value=generate_params['repetition_penalty'],step=0.01,label="repetition_penalty")
|
||||||
no_repeat_ngram_size = gr.Slider(0, 20, step=1, value=generate_params["no_repeat_ngram_size"], label="no_repeat_ngram_size")
|
shared.gradio['no_repeat_ngram_size'] = gr.Slider(0, 20, step=1, value=generate_params["no_repeat_ngram_size"], label="no_repeat_ngram_size")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
typical_p = gr.Slider(0.0,1.0,value=generate_params['typical_p'],step=0.01,label="typical_p")
|
shared.gradio['typical_p'] = gr.Slider(0.0,1.0,value=generate_params['typical_p'],step=0.01,label="typical_p")
|
||||||
min_length = gr.Slider(0, 2000, step=1, value=generate_params["min_length"] if shared.args.no_stream else 0, label="min_length", interactive=shared.args.no_stream)
|
shared.gradio['min_length'] = gr.Slider(0, 2000, step=1, value=generate_params["min_length"] if shared.args.no_stream else 0, label="min_length", interactive=shared.args.no_stream)
|
||||||
|
|
||||||
gr.Markdown("Contrastive search:")
|
gr.Markdown("Contrastive search:")
|
||||||
penalty_alpha = gr.Slider(0, 5, value=generate_params["penalty_alpha"], label="penalty_alpha")
|
shared.gradio['penalty_alpha'] = gr.Slider(0, 5, value=generate_params["penalty_alpha"], label="penalty_alpha")
|
||||||
|
|
||||||
gr.Markdown("Beam search (uses a lot of VRAM):")
|
gr.Markdown("Beam search (uses a lot of VRAM):")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
num_beams = gr.Slider(1, 20, step=1, value=generate_params["num_beams"], label="num_beams")
|
shared.gradio['num_beams'] = gr.Slider(1, 20, step=1, value=generate_params["num_beams"], label="num_beams")
|
||||||
length_penalty = gr.Slider(-5, 5, value=generate_params["length_penalty"], label="length_penalty")
|
shared.gradio['length_penalty'] = gr.Slider(-5, 5, value=generate_params["length_penalty"], label="length_penalty")
|
||||||
early_stopping = gr.Checkbox(value=generate_params["early_stopping"], label="early_stopping")
|
shared.gradio['early_stopping'] = gr.Checkbox(value=generate_params["early_stopping"], label="early_stopping")
|
||||||
|
|
||||||
with gr.Accordion("Soft prompt", open=False, elem_id="accordion"):
|
with gr.Accordion("Soft prompt", open=False, elem_id="accordion"):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
softprompts_menu = gr.Dropdown(choices=available_softprompts, value="None", label='Soft prompt')
|
shared.gradio['softprompts_menu'] = gr.Dropdown(choices=available_softprompts, value="None", label='Soft prompt')
|
||||||
ui.create_refresh_button(softprompts_menu, lambda : None, lambda : {"choices": get_available_softprompts()}, "refresh-button")
|
ui.create_refresh_button(shared.gradio['softprompts_menu'], lambda : None, lambda : {"choices": get_available_softprompts()}, "refresh-button")
|
||||||
|
|
||||||
gr.Markdown('Upload a soft prompt (.zip format):')
|
gr.Markdown('Upload a soft prompt (.zip format):')
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
upload_softprompt = gr.File(type='binary', file_types=[".zip"])
|
shared.gradio['upload_softprompt'] = gr.File(type='binary', file_types=[".zip"])
|
||||||
|
|
||||||
model_menu.change(load_model_wrapper, [model_menu], [model_menu], show_progress=True)
|
shared.gradio['model_menu'].change(load_model_wrapper, [shared.gradio['model_menu']], [shared.gradio['model_menu']], show_progress=True)
|
||||||
preset_menu.change(load_preset_values, [preset_menu], [do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping])
|
shared.gradio['preset_menu'].change(load_preset_values, [shared.gradio['preset_menu']], [shared.gradio['do_sample'], shared.gradio['temperature'], shared.gradio['top_p'], shared.gradio['typical_p'], shared.gradio['repetition_penalty'], shared.gradio['top_k'], shared.gradio['min_length'], shared.gradio['no_repeat_ngram_size'], shared.gradio['num_beams'], shared.gradio['penalty_alpha'], shared.gradio['length_penalty'], shared.gradio['early_stopping']])
|
||||||
softprompts_menu.change(load_soft_prompt, [softprompts_menu], [softprompts_menu], show_progress=True)
|
shared.gradio['softprompts_menu'].change(load_soft_prompt, [shared.gradio['softprompts_menu']], [shared.gradio['softprompts_menu']], show_progress=True)
|
||||||
upload_softprompt.upload(upload_soft_prompt, [upload_softprompt], [softprompts_menu])
|
shared.gradio['upload_softprompt'].upload(upload_soft_prompt, [shared.gradio['upload_softprompt']], [shared.gradio['softprompts_menu']])
|
||||||
return preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping
|
|
||||||
|
|
||||||
available_models = get_available_models()
|
available_models = get_available_models()
|
||||||
available_presets = get_available_presets()
|
available_presets = get_available_presets()
|
||||||
@ -174,10 +173,9 @@ else:
|
|||||||
shared.model, shared.tokenizer = load_model(shared.model_name)
|
shared.model, shared.tokenizer = load_model(shared.model_name)
|
||||||
|
|
||||||
# UI settings
|
# UI settings
|
||||||
buttons = {}
|
|
||||||
gen_events = []
|
gen_events = []
|
||||||
suffix = '_pygmalion' if 'pygmalion' in shared.model_name.lower() else ''
|
suffix = '_pygmalion' if 'pygmalion' in shared.model_name.lower() else ''
|
||||||
description = f"\n\n# Text generation lab\nGenerate text using Large Language Models.\n"
|
description = "\n\n# Text generation lab\nGenerate text using Large Language Models.\n"
|
||||||
if shared.model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')):
|
if shared.model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')):
|
||||||
default_text = shared.settings['prompt_gpt4chan']
|
default_text = shared.settings['prompt_gpt4chan']
|
||||||
elif re.match('(rosey|chip|joi)_.*_instruct.*', shared.model_name.lower()) is not None:
|
elif re.match('(rosey|chip|joi)_.*_instruct.*', shared.model_name.lower()) is not None:
|
||||||
@ -186,176 +184,178 @@ else:
|
|||||||
default_text = shared.settings['prompt']
|
default_text = shared.settings['prompt']
|
||||||
|
|
||||||
if shared.args.chat or shared.args.cai_chat:
|
if shared.args.chat or shared.args.cai_chat:
|
||||||
with gr.Blocks(css=ui.css+ui.chat_css, analytics_enabled=False) as interface:
|
with gr.Blocks(css=ui.css+ui.chat_css, analytics_enabled=False) as shared.gradio['interface']:
|
||||||
interface.load(lambda : chat.load_default_history(shared.settings[f'name1{suffix}'], shared.settings[f'name2{suffix}']), None, None)
|
shared.gradio['interface'].load(lambda : chat.load_default_history(shared.settings[f'name1{suffix}'], shared.settings[f'name2{suffix}']), None, None)
|
||||||
if shared.args.cai_chat:
|
if shared.args.cai_chat:
|
||||||
display = gr.HTML(value=generate_chat_html(shared.history['visible'], shared.settings[f'name1{suffix}'], shared.settings[f'name2{suffix}'], shared.character))
|
shared.gradio['display'] = gr.HTML(value=generate_chat_html(shared.history['visible'], shared.settings[f'name1{suffix}'], shared.settings[f'name2{suffix}'], shared.character))
|
||||||
else:
|
else:
|
||||||
display = gr.Chatbot(value=shared.history['visible'])
|
shared.gradio['display'] = gr.Chatbot(value=shared.history['visible'])
|
||||||
textbox = gr.Textbox(label='Input')
|
shared.gradio['textbox'] = gr.Textbox(label='Input')
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
buttons["Stop"] = gr.Button("Stop")
|
shared.gradio["Stop"] = gr.Button("Stop")
|
||||||
buttons["Generate"] = gr.Button("Generate")
|
shared.gradio["Generate"] = gr.Button("Generate")
|
||||||
buttons["Regenerate"] = gr.Button("Regenerate")
|
shared.gradio["Regenerate"] = gr.Button("Regenerate")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
buttons["Impersonate"] = gr.Button("Impersonate")
|
shared.gradio["Impersonate"] = gr.Button("Impersonate")
|
||||||
buttons["Remove last"] = gr.Button("Remove last")
|
shared.gradio["Remove last"] = gr.Button("Remove last")
|
||||||
buttons["Clear history"] = gr.Button("Clear history")
|
shared.gradio["Clear history"] = gr.Button("Clear history")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
buttons["Send last reply to input"] = gr.Button("Send last reply to input")
|
shared.gradio["Send last reply to input"] = gr.Button("Send last reply to input")
|
||||||
buttons["Replace last reply"] = gr.Button("Replace last reply")
|
shared.gradio["Replace last reply"] = gr.Button("Replace last reply")
|
||||||
if shared.args.picture:
|
if shared.args.picture:
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
picture_select = gr.Image(label="Send a picture", type='pil')
|
shared.gradio['picture_select'] = gr.Image(label="Send a picture", type='pil')
|
||||||
|
|
||||||
with gr.Tab("Chat settings"):
|
with gr.Tab("Chat settings"):
|
||||||
name1 = gr.Textbox(value=shared.settings[f'name1{suffix}'], lines=1, label='Your name')
|
shared.gradio['name1'] = gr.Textbox(value=shared.settings[f'name1{suffix}'], lines=1, label='Your name')
|
||||||
name2 = gr.Textbox(value=shared.settings[f'name2{suffix}'], lines=1, label='Bot\'s name')
|
shared.gradio['name2'] = gr.Textbox(value=shared.settings[f'name2{suffix}'], lines=1, label='Bot\'s name')
|
||||||
context = gr.Textbox(value=shared.settings[f'context{suffix}'], lines=2, label='Context')
|
shared.gradio['context'] = gr.Textbox(value=shared.settings[f'context{suffix}'], lines=2, label='Context')
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
character_menu = gr.Dropdown(choices=available_characters, value="None", label='Character')
|
shared.gradio['character_menu'] = gr.Dropdown(choices=available_characters, value="None", label='Character')
|
||||||
ui.create_refresh_button(character_menu, lambda : None, lambda : {"choices": get_available_characters()}, "refresh-button")
|
ui.create_refresh_button(shared.gradio['character_menu'], lambda : None, lambda : {"choices": get_available_characters()}, "refresh-button")
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
check = gr.Checkbox(value=shared.settings[f'stop_at_newline{suffix}'], label='Stop generating at new line character?')
|
shared.gradio['check'] = gr.Checkbox(value=shared.settings[f'stop_at_newline{suffix}'], label='Stop generating at new line character?')
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Tab('Chat history'):
|
with gr.Tab('Chat history'):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
gr.Markdown('Upload')
|
gr.Markdown('Upload')
|
||||||
upload_chat_history = gr.File(type='binary', file_types=[".json", ".txt"])
|
shared.gradio['upload_chat_history'] = gr.File(type='binary', file_types=[".json", ".txt"])
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
gr.Markdown('Download')
|
gr.Markdown('Download')
|
||||||
download = gr.File()
|
shared.gradio['download'] = gr.File()
|
||||||
buttons["Download"] = gr.Button(value="Click me")
|
shared.gradio["Download"] = gr.Button(value="Click me")
|
||||||
with gr.Tab('Upload character'):
|
with gr.Tab('Upload character'):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
gr.Markdown('1. Select the JSON file')
|
gr.Markdown('1. Select the JSON file')
|
||||||
upload_char = gr.File(type='binary', file_types=[".json"])
|
shared.gradio['upload_char'] = gr.File(type='binary', file_types=[".json"])
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
gr.Markdown('2. Select your character\'s profile picture (optional)')
|
gr.Markdown('2. Select your character\'s profile picture (optional)')
|
||||||
upload_img = gr.File(type='binary', file_types=["image"])
|
shared.gradio['upload_img'] = gr.File(type='binary', file_types=["image"])
|
||||||
buttons["Upload character"] = gr.Button(value="Submit")
|
shared.gradio["Upload character"] = gr.Button(value="Submit")
|
||||||
with gr.Tab('Upload your profile picture'):
|
with gr.Tab('Upload your profile picture'):
|
||||||
upload_img_me = gr.File(type='binary', file_types=["image"])
|
shared.gradio['upload_img_me'] = gr.File(type='binary', file_types=["image"])
|
||||||
with gr.Tab('Upload TavernAI Character Card'):
|
with gr.Tab('Upload TavernAI Character Card'):
|
||||||
upload_img_tavern = gr.File(type='binary', file_types=["image"])
|
shared.gradio['upload_img_tavern'] = gr.File(type='binary', file_types=["image"])
|
||||||
|
|
||||||
with gr.Tab("Generation settings"):
|
with gr.Tab("Generation settings"):
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
max_new_tokens = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
|
shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
chat_prompt_size_slider = gr.Slider(minimum=shared.settings['chat_prompt_size_min'], maximum=shared.settings['chat_prompt_size_max'], step=1, label='Maximum prompt size in tokens', value=shared.settings['chat_prompt_size'])
|
shared.gradio['chat_prompt_size_slider'] = gr.Slider(minimum=shared.settings['chat_prompt_size_min'], maximum=shared.settings['chat_prompt_size_max'], step=1, label='Maximum prompt size in tokens', value=shared.settings['chat_prompt_size'])
|
||||||
|
|
||||||
preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping = create_settings_menus()
|
|
||||||
|
|
||||||
|
create_settings_menus()
|
||||||
if shared.args.extensions is not None:
|
if shared.args.extensions is not None:
|
||||||
with gr.Tab("Extensions"):
|
with gr.Tab("Extensions"):
|
||||||
extensions_module.create_extensions_block()
|
extensions_module.create_extensions_block()
|
||||||
|
|
||||||
input_params = [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, name1, name2, context, check, chat_prompt_size_slider]
|
input_params = [shared.gradio[i] for i in ['textbox', 'max_new_tokens', 'do_sample', 'max_new_tokens', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping', 'name1', 'name2', 'context', 'check', 'chat_prompt_size_slider']]
|
||||||
if shared.args.picture:
|
if shared.args.picture:
|
||||||
input_params.append(picture_select)
|
input_params.append(shared.gradio['picture_select'])
|
||||||
function_call = "chat.cai_chatbot_wrapper" if shared.args.cai_chat else "chat.chatbot_wrapper"
|
function_call = "chat.cai_chatbot_wrapper" if shared.args.cai_chat else "chat.chatbot_wrapper"
|
||||||
|
|
||||||
gen_events.append(buttons["Generate"].click(eval(function_call), input_params, display, show_progress=shared.args.no_stream, api_name="textgen"))
|
gen_events.append(shared.gradio["Generate"].click(eval(function_call), input_params, shared.gradio['display'], show_progress=shared.args.no_stream, api_name="textgen"))
|
||||||
gen_events.append(textbox.submit(eval(function_call), input_params, display, show_progress=shared.args.no_stream))
|
gen_events.append(shared.gradio['textbox'].submit(eval(function_call), input_params, shared.gradio['display'], show_progress=shared.args.no_stream))
|
||||||
if shared.args.picture:
|
if shared.args.picture:
|
||||||
picture_select.upload(eval(function_call), input_params, display, show_progress=shared.args.no_stream)
|
shared.gradio['picture_select'].upload(eval(function_call), input_params, shared.gradio['display'], show_progress=shared.args.no_stream)
|
||||||
gen_events.append(buttons["Regenerate"].click(chat.regenerate_wrapper, input_params, display, show_progress=shared.args.no_stream))
|
gen_events.append(shared.gradio["Regenerate"].click(chat.regenerate_wrapper, input_params, shared.gradio['display'], show_progress=shared.args.no_stream))
|
||||||
gen_events.append(buttons["Impersonate"].click(chat.impersonate_wrapper, input_params, textbox, show_progress=shared.args.no_stream))
|
gen_events.append(shared.gradio["Impersonate"].click(chat.impersonate_wrapper, input_params, shared.gradio['textbox'], show_progress=shared.args.no_stream))
|
||||||
buttons["Stop"].click(chat.stop_everything_event, [], [], cancels=gen_events)
|
shared.gradio["Stop"].click(chat.stop_everything_event, [], [], cancels=gen_events)
|
||||||
|
|
||||||
buttons["Send last reply to input"].click(chat.send_last_reply_to_input, [], textbox, show_progress=shared.args.no_stream)
|
shared.gradio["Send last reply to input"].click(chat.send_last_reply_to_input, [], shared.gradio['textbox'], show_progress=shared.args.no_stream)
|
||||||
buttons["Replace last reply"].click(chat.replace_last_reply, [textbox, name1, name2], display, show_progress=shared.args.no_stream)
|
shared.gradio["Replace last reply"].click(chat.replace_last_reply, [shared.gradio['textbox'], shared.gradio['name1'], shared.gradio['name2']], shared.gradio['display'], show_progress=shared.args.no_stream)
|
||||||
buttons["Clear history"].click(chat.clear_chat_log, [name1, name2], display)
|
shared.gradio["Clear history"].click(chat.clear_chat_log, [shared.gradio['name1'], shared.gradio['name2']], shared.gradio['display'])
|
||||||
buttons["Remove last"].click(chat.remove_last_message, [name1, name2], [display, textbox], show_progress=False)
|
shared.gradio["Remove last"].click(chat.remove_last_message, [shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['display'], shared.gradio['textbox']], show_progress=False)
|
||||||
buttons["Download"].click(chat.save_history, inputs=[], outputs=[download])
|
shared.gradio["Download"].click(chat.save_history, inputs=[], outputs=[shared.gradio['download']])
|
||||||
buttons["Upload character"].click(chat.upload_character, [upload_char, upload_img], [character_menu])
|
shared.gradio["Upload character"].click(chat.upload_character, [shared.gradio['upload_char'], shared.gradio['upload_img']], [shared.gradio['character_menu']])
|
||||||
|
|
||||||
# Clearing stuff and saving the history
|
# Clearing stuff and saving the history
|
||||||
for i in ["Generate", "Regenerate", "Replace last reply"]:
|
for i in ["Generate", "Regenerate", "Replace last reply"]:
|
||||||
buttons[i].click(lambda x: "", textbox, textbox, show_progress=False)
|
shared.gradio[i].click(lambda x: "", shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False)
|
||||||
buttons[i].click(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
|
shared.gradio[i].click(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
|
||||||
buttons["Clear history"].click(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
|
shared.gradio["Clear history"].click(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
|
||||||
textbox.submit(lambda x: "", textbox, textbox, show_progress=False)
|
shared.gradio['textbox'].submit(lambda x: "", shared.gradio['textbox'], shared.gradio['textbox'], show_progress=False)
|
||||||
textbox.submit(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
|
shared.gradio['textbox'].submit(lambda : chat.save_history(timestamp=False), [], [], show_progress=False)
|
||||||
|
|
||||||
character_menu.change(chat.load_character, [character_menu, name1, name2], [name2, context, display])
|
shared.gradio['character_menu'].change(chat.load_character, [shared.gradio['character_menu'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['name2'], shared.gradio['context'], shared.gradio['display']])
|
||||||
upload_chat_history.upload(chat.load_history, [upload_chat_history, name1, name2], [])
|
shared.gradio['upload_chat_history'].upload(chat.load_history, [shared.gradio['upload_chat_history'], shared.gradio['name1'], shared.gradio['name2']], [])
|
||||||
upload_img_tavern.upload(chat.upload_tavern_character, [upload_img_tavern, name1, name2], [character_menu])
|
shared.gradio['upload_img_tavern'].upload(chat.upload_tavern_character, [shared.gradio['upload_img_tavern'], shared.gradio['name1'], shared.gradio['name2']], [shared.gradio['character_menu']])
|
||||||
upload_img_me.upload(chat.upload_your_profile_picture, [upload_img_me], [])
|
shared.gradio['upload_img_me'].upload(chat.upload_your_profile_picture, [shared.gradio['upload_img_me']], [])
|
||||||
if shared.args.picture:
|
if shared.args.picture:
|
||||||
picture_select.upload(lambda : None, [], [picture_select], show_progress=False)
|
shared.gradio['picture_select'].upload(lambda : None, [], [shared.gradio['picture_select']], show_progress=False)
|
||||||
|
|
||||||
reload_func = chat.redraw_html if shared.args.cai_chat else lambda : shared.history['visible']
|
reload_func = chat.redraw_html if shared.args.cai_chat else lambda : shared.history['visible']
|
||||||
reload_inputs = [name1, name2] if shared.args.cai_chat else []
|
reload_inputs = [shared.gradio['name1'], shared.gradio['name2']] if shared.args.cai_chat else []
|
||||||
upload_chat_history.upload(reload_func, reload_inputs, [display])
|
shared.gradio['upload_chat_history'].upload(reload_func, reload_inputs, [shared.gradio['display']])
|
||||||
upload_img_me.upload(reload_func, reload_inputs, [display])
|
shared.gradio['upload_img_me'].upload(reload_func, reload_inputs, [shared.gradio['display']])
|
||||||
interface.load(reload_func, reload_inputs, [display], show_progress=True)
|
shared.gradio['interface'].load(reload_func, reload_inputs, [shared.gradio['display']], show_progress=True)
|
||||||
|
|
||||||
elif shared.args.notebook:
|
elif shared.args.notebook:
|
||||||
with gr.Blocks(css=ui.css, analytics_enabled=False) as interface:
|
with gr.Blocks(css=ui.css, analytics_enabled=False) as shared.gradio['interface']:
|
||||||
gr.Markdown(description)
|
gr.Markdown(description)
|
||||||
with gr.Tab('Raw'):
|
with gr.Tab('Raw'):
|
||||||
textbox = gr.Textbox(value=default_text, lines=23)
|
shared.gradio['textbox'] = gr.Textbox(value=default_text, lines=23)
|
||||||
with gr.Tab('Markdown'):
|
with gr.Tab('Markdown'):
|
||||||
markdown = gr.Markdown()
|
shared.gradio['markdown'] = gr.Markdown()
|
||||||
with gr.Tab('HTML'):
|
with gr.Tab('HTML'):
|
||||||
html = gr.HTML()
|
shared.gradio['html'] = gr.HTML()
|
||||||
|
|
||||||
buttons["Generate"] = gr.Button("Generate")
|
shared.gradio["Generate"] = gr.Button("Generate")
|
||||||
buttons["Stop"] = gr.Button("Stop")
|
shared.gradio["Stop"] = gr.Button("Stop")
|
||||||
|
|
||||||
max_new_tokens = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
|
shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
|
||||||
|
|
||||||
preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping = create_settings_menus()
|
|
||||||
|
|
||||||
|
create_settings_menus()
|
||||||
if shared.args.extensions is not None:
|
if shared.args.extensions is not None:
|
||||||
extensions_module.create_extensions_block()
|
extensions_module.create_extensions_block()
|
||||||
|
|
||||||
gen_events.append(buttons["Generate"].click(generate_reply, [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping], [textbox, markdown, html], show_progress=shared.args.no_stream, api_name="textgen"))
|
input_params = [shared.gradio[k] for k in ('textbox', 'max_new_tokens', 'do_sample', 'max_new_tokens', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping')]
|
||||||
gen_events.append(textbox.submit(generate_reply, [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping], [textbox, markdown, html], show_progress=shared.args.no_stream))
|
output_params = [shared.gradio[k] for k in ["textbox", "markdown", "html"]]
|
||||||
buttons["Stop"].click(None, None, None, cancels=gen_events)
|
gen_events.append(shared.gradio["Generate"].click(generate_reply, input_params, output_params, show_progress=shared.args.no_stream, api_name="textgen"))
|
||||||
|
gen_events.append(shared.gradio['textbox'].submit(generate_reply, input_params, output_params, show_progress=shared.args.no_stream))
|
||||||
|
shared.gradio["Stop"].click(None, None, None, cancels=gen_events)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
with gr.Blocks(css=ui.css, analytics_enabled=False) as interface:
|
with gr.Blocks(css=ui.css, analytics_enabled=False) as shared.gradio['interface']:
|
||||||
gr.Markdown(description)
|
gr.Markdown(description)
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
textbox = gr.Textbox(value=default_text, lines=15, label='Input')
|
shared.gradio['textbox'] = gr.Textbox(value=default_text, lines=15, label='Input')
|
||||||
max_new_tokens = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
|
shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
|
||||||
buttons["Generate"] = gr.Button("Generate")
|
shared.gradio["Generate"] = gr.Button("Generate")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
buttons["Continue"] = gr.Button("Continue")
|
shared.gradio["Continue"] = gr.Button("Continue")
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
buttons["Stop"] = gr.Button("Stop")
|
shared.gradio["Stop"] = gr.Button("Stop")
|
||||||
|
|
||||||
preset_menu, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping = create_settings_menus()
|
create_settings_menus()
|
||||||
if shared.args.extensions is not None:
|
if shared.args.extensions is not None:
|
||||||
extensions_module.create_extensions_block()
|
extensions_module.create_extensions_block()
|
||||||
|
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
with gr.Tab('Raw'):
|
with gr.Tab('Raw'):
|
||||||
output_textbox = gr.Textbox(lines=15, label='Output')
|
shared.gradio['output_textbox'] = gr.Textbox(lines=15, label='Output')
|
||||||
with gr.Tab('Markdown'):
|
with gr.Tab('Markdown'):
|
||||||
markdown = gr.Markdown()
|
shared.gradio['markdown'] = gr.Markdown()
|
||||||
with gr.Tab('HTML'):
|
with gr.Tab('HTML'):
|
||||||
html = gr.HTML()
|
shared.gradio['html'] = gr.HTML()
|
||||||
|
|
||||||
gen_events.append(buttons["Generate"].click(generate_reply, [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping], [output_textbox, markdown, html], show_progress=shared.args.no_stream, api_name="textgen"))
|
input_params = [shared.gradio[k] for k in ['textbox', 'max_new_tokens', 'do_sample', 'max_new_tokens', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']]
|
||||||
gen_events.append(textbox.submit(generate_reply, [textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping], [output_textbox, markdown, html], show_progress=shared.args.no_stream))
|
output_params = [shared.gradio[k] for k in ['output_textbox', 'markdown', 'html']]
|
||||||
gen_events.append(buttons["Continue"].click(generate_reply, [output_textbox, max_new_tokens, do_sample, max_new_tokens, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping], [output_textbox, markdown, html], show_progress=shared.args.no_stream))
|
gen_events.append(shared.gradio['Generate'].click(generate_reply, input_params, output_params, show_progress=shared.args.no_stream, api_name="textgen"))
|
||||||
buttons["Stop"].click(None, None, None, cancels=gen_events)
|
gen_events.append(shared.gradio['textbox'].submit(generate_reply, input_params, output_params, show_progress=shared.args.no_stream))
|
||||||
|
gen_events.append(shared.gradio['Continue'].click(generate_reply, [shared.gradio['output_textbox']] + input_params[1:], output_params, show_progress=shared.args.no_stream))
|
||||||
|
shared.gradio["Stop"].click(None, None, None, cancels=gen_events)
|
||||||
|
|
||||||
interface.queue()
|
shared.gradio['interface'].queue()
|
||||||
if shared.args.listen:
|
if shared.args.listen:
|
||||||
interface.launch(prevent_thread_lock=True, share=shared.args.share, server_name="0.0.0.0", server_port=shared.args.listen_port)
|
shared.gradio['interface'].launch(prevent_thread_lock=True, share=shared.args.share, server_name="0.0.0.0", server_port=shared.args.listen_port)
|
||||||
else:
|
else:
|
||||||
interface.launch(prevent_thread_lock=True, share=shared.args.share, server_port=shared.args.listen_port)
|
shared.gradio['interface'].launch(prevent_thread_lock=True, share=shared.args.share, server_port=shared.args.listen_port)
|
||||||
|
|
||||||
# I think that I will need this later
|
# I think that I will need this later
|
||||||
while True:
|
while True:
|
||||||
|
Loading…
Reference in New Issue
Block a user