From 3a00cb1bbd0730fca68c0a082792193d52690cfd Mon Sep 17 00:00:00 2001 From: oobabooga <112222186+oobabooga@users.noreply.github.com> Date: Fri, 13 Jan 2023 14:28:53 -0300 Subject: [PATCH] Reorganize GUI elements --- presets/Default.txt | 2 +- presets/KoboldAI-Good Winds (Skein 6B).txt | 2 +- presets/Naive.txt | 2 +- presets/NovelAI-All-Nighter.txt | 2 +- presets/NovelAI-Luna Moth.txt | 2 +- presets/NovelAI-Mothra (13B).txt | 2 +- presets/NovelAI-Sphinx Moth.txt | 2 +- presets/Pygmalion.txt | 2 +- presets/Verbose.txt | 4 ++-- server.py | 25 +++++++++++----------- 10 files changed, 23 insertions(+), 22 deletions(-) diff --git a/presets/Default.txt b/presets/Default.txt index 16c6fdba..0a4ec5a0 100644 --- a/presets/Default.txt +++ b/presets/Default.txt @@ -1,4 +1,4 @@ do_sample=True, -max_new_tokens=max_length, +max_new_tokens=tokens, top_k=100, top_p=0.9, diff --git a/presets/KoboldAI-Good Winds (Skein 6B).txt b/presets/KoboldAI-Good Winds (Skein 6B).txt index f1e5346e..feae41d1 100644 --- a/presets/KoboldAI-Good Winds (Skein 6B).txt +++ b/presets/KoboldAI-Good Winds (Skein 6B).txt @@ -1,5 +1,5 @@ do_sample=True, -max_new_tokens=max_length, +max_new_tokens=tokens, top_p=1.0, top_k=0, temperature=0.7, diff --git a/presets/Naive.txt b/presets/Naive.txt index e323a580..1a72bc14 100644 --- a/presets/Naive.txt +++ b/presets/Naive.txt @@ -1,5 +1,5 @@ do_sample=True, -max_new_tokens=max_length, +max_new_tokens=tokens, top_p=1, typical_p=0.3, temperature=0.7, diff --git a/presets/NovelAI-All-Nighter.txt b/presets/NovelAI-All-Nighter.txt index 5cf9d84e..9ca7245e 100644 --- a/presets/NovelAI-All-Nighter.txt +++ b/presets/NovelAI-All-Nighter.txt @@ -1,5 +1,5 @@ do_sample=True, -max_new_tokens=max_length, +max_new_tokens=tokens, top_p=1.0, top_k=13, temperature=1.33, diff --git a/presets/NovelAI-Luna Moth.txt b/presets/NovelAI-Luna Moth.txt index c77d7f0d..1a18f583 100644 --- a/presets/NovelAI-Luna Moth.txt +++ b/presets/NovelAI-Luna Moth.txt @@ -1,5 +1,5 @@ do_sample=True, -max_new_tokens=max_length, +max_new_tokens=tokens, top_p=0.24, top_k=85, temperature=2.0, diff --git a/presets/NovelAI-Mothra (13B).txt b/presets/NovelAI-Mothra (13B).txt index 59822c4e..c95a58d3 100644 --- a/presets/NovelAI-Mothra (13B).txt +++ b/presets/NovelAI-Mothra (13B).txt @@ -1,5 +1,5 @@ do_sample=True, -max_new_tokens=max_length, +max_new_tokens=tokens, top_p=1.0, top_k=100, temperature=1.25, diff --git a/presets/NovelAI-Sphinx Moth.txt b/presets/NovelAI-Sphinx Moth.txt index f5e7b6e8..def6be06 100644 --- a/presets/NovelAI-Sphinx Moth.txt +++ b/presets/NovelAI-Sphinx Moth.txt @@ -1,5 +1,5 @@ do_sample=True, -max_new_tokens=max_length, +max_new_tokens=tokens, top_p=0.18, top_k=30, temperature=2.0, diff --git a/presets/Pygmalion.txt b/presets/Pygmalion.txt index ef430985..35053b7e 100644 --- a/presets/Pygmalion.txt +++ b/presets/Pygmalion.txt @@ -1,5 +1,5 @@ do_sample=True, -max_new_tokens=max_length, +max_new_tokens=tokens, top_p=0.9, top_k=0, temperature=0.5, diff --git a/presets/Verbose.txt b/presets/Verbose.txt index 7321a9c6..d0356819 100644 --- a/presets/Verbose.txt +++ b/presets/Verbose.txt @@ -1,6 +1,6 @@ num_beams=10, -min_length=max_length, -max_new_tokens=max_length, +min_length=tokens, +max_new_tokens=tokens, length_penalty =1.4, no_repeat_ngram_size=2, early_stopping=True, diff --git a/server.py b/server.py index af431801..b9599462 100644 --- a/server.py +++ b/server.py @@ -94,7 +94,7 @@ def generate_html(s): s = f'
{s}
' return s -def generate_reply(question, max_length, inference_settings, selected_model, eos_token=None): +def generate_reply(question, tokens, inference_settings, selected_model, eos_token=None): global model, tokenizer, model_name, loaded_preset, preset if selected_model != model_name: @@ -176,19 +176,19 @@ if args.notebook: html = gr.HTML() btn = gr.Button("Generate") + length_slider = gr.Slider(minimum=1, maximum=2000, step=1, label='max_new_tokens', value=200) with gr.Row(): - with gr.Column(): - length_slider = gr.Slider(minimum=1, maximum=2000, step=1, label='max_length', value=200) with gr.Column(): model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model') - preset_menu = gr.Dropdown(choices=available_presets, value="NovelAI-Sphinx Moth", label='Preset') + with gr.Column(): + preset_menu = gr.Dropdown(choices=available_presets, value="NovelAI-Sphinx Moth", label='Settings preset') btn.click(generate_reply, [textbox, length_slider, preset_menu, model_menu], [textbox, markdown, html], show_progress=True, api_name="textgen") textbox.submit(generate_reply, [textbox, length_slider, preset_menu, model_menu], [textbox, markdown, html], show_progress=True) elif args.chat: history = [] - def chatbot_wrapper(text, max_length, inference_settings, selected_model, name1, name2, context): + def chatbot_wrapper(text, tokens, inference_settings, selected_model, name1, name2, context): question = context+'\n\n' for i in range(len(history)): question += f"{name1}: {history[i][0][3:-5].strip()}\n" @@ -196,7 +196,7 @@ elif args.chat: question += f"{name1}: {text.strip()}\n" question += f"{name2}:" - reply = generate_reply(question, max_length, inference_settings, selected_model, eos_token='\n')[0] + reply = generate_reply(question, tokens, inference_settings, selected_model, eos_token='\n')[0] reply = reply[len(question):].split('\n')[0].strip() history.append((text, reply)) return history @@ -218,12 +218,13 @@ elif args.chat: gr.Markdown(description) with gr.Row(): with gr.Column(): + length_slider = gr.Slider(minimum=1, maximum=2000, step=1, label='max_new_tokens', value=200) with gr.Row(): with gr.Column(): model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model') - preset_menu = gr.Dropdown(choices=available_presets, value="NovelAI-Sphinx Moth", label='Preset') with gr.Column(): - length_slider = gr.Slider(minimum=1, maximum=2000, step=1, label='max_length', value=200) + preset_menu = gr.Dropdown(choices=available_presets, value="NovelAI-Sphinx Moth", label='Settings preset') + name1 = gr.Textbox(value=name1_str, lines=1, label='Your name') name2 = gr.Textbox(value=name2_str, lines=1, label='Bot\'s name') context = gr.Textbox(value=context_str, lines=2, label='Context') @@ -241,8 +242,8 @@ elif args.chat: btn2.click(lambda x: "", display1, display1) else: - def continue_wrapper(question, max_length, inference_settings, selected_model): - a, b, c = generate_reply(question, max_length, inference_settings, selected_model) + def continue_wrapper(question, tokens, inference_settings, selected_model): + a, b, c = generate_reply(question, tokens, inference_settings, selected_model) return a, a, b, c with gr.Blocks(css=css, analytics_enabled=False) as interface: @@ -250,8 +251,8 @@ else: with gr.Row(): with gr.Column(): textbox = gr.Textbox(value=default_text, lines=15, label='Input') - length_slider = gr.Slider(minimum=1, maximum=2000, step=1, label='max_length', value=200) - preset_menu = gr.Dropdown(choices=available_presets, value="NovelAI-Sphinx Moth", label='Preset') + length_slider = gr.Slider(minimum=1, maximum=2000, step=1, label='max_new_tokens', value=200) + preset_menu = gr.Dropdown(choices=available_presets, value="NovelAI-Sphinx Moth", label='Settings preset') model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model') btn = gr.Button("Generate") cont = gr.Button("Continue")