diff --git a/README.md b/README.md index 82557f5f..601eb9c7 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ A gradio webui for running large language models like gpt-j-6B, gpt-neo, opt, ga Its goal is to become the [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) of text generation. -|![Image1](https://github.com/oobabooga/screenshots/raw/main/qa.png) | ![Image2](https://github.com/oobabooga/screenshots/raw/main/cai.png) | +|![Image1](https://github.com/oobabooga/screenshots/raw/main/qa.png) | ![Image2](https://github.com/oobabooga/screenshots/raw/main/cai2.png) | |:---:|:---:| |![Image3](https://github.com/oobabooga/screenshots/raw/main/gpt4chan.png) | ![Image4](https://github.com/oobabooga/screenshots/raw/main/galactica.png) | diff --git a/html_generator.py b/html_generator.py index 05c63f0e..71eb299e 100644 --- a/html_generator.py +++ b/html_generator.py @@ -166,7 +166,7 @@ def generate_chat_html(history, name1, name2): margin-left: auto; margin-right: auto; max-width: 800px; - height: 50vh; + height: 66.67vh; overflow-y: auto; padding-right: 20px; display: flex; diff --git a/server.py b/server.py index 69755d85..8b59b622 100644 --- a/server.py +++ b/server.py @@ -185,7 +185,7 @@ else: default_text = settings['prompt'] description = f"\n\n# Text generation lab\nGenerate text using Large Language Models.\n" -css=".my-4 {margin-top: 0} .py-6 {padding-top: 2.5rem}" +css = ".my-4 {margin-top: 0} .py-6 {padding-top: 2.5rem}" if args.notebook: with gr.Blocks(css=css, analytics_enabled=False) as interface: @@ -268,35 +268,33 @@ elif args.chat or args.cai_chat: name1_str = settings['name1'] name2_str = settings['name2'] - with gr.Blocks(css=css+".h-\[40vh\] {height: 50vh}", analytics_enabled=False) as interface: - gr.Markdown(description) + with gr.Blocks(css=css+".h-\[40vh\] {height: 66.67vh} .gradio-container {max-width: 800px; margin-left: auto; margin-right: auto}", analytics_enabled=False) as interface: + if args.cai_chat: + display1 = gr.HTML(value=generate_chat_html([], "", "")) + else: + display1 = gr.Chatbot() + textbox = gr.Textbox(lines=2, label='Input') + btn = gr.Button("Generate") with gr.Row(): with gr.Column(): - length_slider = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens']) - with gr.Row(): - with gr.Column(): - model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model') - with gr.Column(): - preset_menu = gr.Dropdown(choices=available_presets, value=settings['preset'], label='Settings preset') - - name1 = gr.Textbox(value=name1_str, lines=1, label='Your name') - name2 = gr.Textbox(value=name2_str, lines=1, label='Bot\'s name') - context = gr.Textbox(value=context_str, lines=2, label='Context') - with gr.Row(): - check = gr.Checkbox(value=settings['stop_at_newline'], label='Stop generating at new line character?') - + btn3 = gr.Button("Remove last message") with gr.Column(): - if args.cai_chat: - display1 = gr.HTML(value=generate_chat_html([], "", "")) - else: - display1 = gr.Chatbot() - textbox = gr.Textbox(lines=2, label='Input') - btn = gr.Button("Generate") - with gr.Row(): - with gr.Column(): - btn3 = gr.Button("Remove last message") - with gr.Column(): - btn2 = gr.Button("Clear history") + btn2 = gr.Button("Clear history") + + length_slider = gr.Slider(minimum=settings['max_new_tokens_min'], maximum=settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=settings['max_new_tokens']) + with gr.Row(): + with gr.Column(): + model_menu = gr.Dropdown(choices=available_models, value=model_name, label='Model') + with gr.Column(): + preset_menu = gr.Dropdown(choices=available_presets, value=settings['preset'], label='Settings preset') + + name1 = gr.Textbox(value=name1_str, lines=1, label='Your name') + name2 = gr.Textbox(value=name2_str, lines=1, label='Bot\'s name') + context = gr.Textbox(value=context_str, lines=2, label='Context') + with gr.Row(): + check = gr.Checkbox(value=settings['stop_at_newline'], label='Stop generating at new line character?') + + if args.cai_chat: btn.click(cai_chatbot_wrapper, [textbox, length_slider, preset_menu, model_menu, name1, name2, context, check], display1, show_progress=True, api_name="textgen")