diff --git a/README.md b/README.md
index 6694e500..73ae33bd 100644
--- a/README.md
+++ b/README.md
@@ -189,8 +189,6 @@ Optionally, you can use the following command-line flags:
| Flag | Description |
|--------------------------------------------|-------------|
| `-h`, `--help` | Show this help message and exit. |
-| `--notebook` | Launch the web UI in notebook mode, where the output is written to the same text box as the input. |
-| `--chat` | Launch the web UI in chat mode. |
| `--multi-user` | Multi-user mode. Chat histories are not saved or automatically loaded. WARNING: this is highly experimental. |
| `--character CHARACTER` | The name of the character to load in chat mode by default. |
| `--model MODEL` | Name of the model to load by default. |
diff --git a/api-examples/api-example-chat-stream.py b/api-examples/api-example-chat-stream.py
index 055900bd..cccd5b26 100644
--- a/api-examples/api-example-chat-stream.py
+++ b/api-examples/api-example-chat-stream.py
@@ -36,8 +36,6 @@ async def run(user_input, history):
# 'turn_template': 'turn_template', # Optional
'regenerate': False,
'_continue': False,
- 'stop_at_newline': False,
- 'chat_generation_attempts': 1,
'chat_instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>',
# Generation params. If 'preset' is set to different than 'None', the values
diff --git a/api-examples/api-example-chat.py b/api-examples/api-example-chat.py
index c3d0c538..c197a584 100644
--- a/api-examples/api-example-chat.py
+++ b/api-examples/api-example-chat.py
@@ -30,8 +30,6 @@ def run(user_input, history):
# 'turn_template': 'turn_template', # Optional
'regenerate': False,
'_continue': False,
- 'stop_at_newline': False,
- 'chat_generation_attempts': 1,
'chat_instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>',
# Generation params. If 'preset' is set to different than 'None', the values
diff --git a/css/chat.css b/css/chat.css
deleted file mode 100644
index 677d86db..00000000
--- a/css/chat.css
+++ /dev/null
@@ -1,146 +0,0 @@
-.h-\[40vh\], .wrap.svelte-byatnx.svelte-byatnx.svelte-byatnx {
- height: 66.67vh
-}
-
-.gradio-container {
- margin-left: auto !important;
- margin-right: auto !important;
-}
-
-.w-screen {
- width: unset
-}
-
-div.svelte-362y77>*, div.svelte-362y77>.form>* {
- flex-wrap: nowrap
-}
-
-/* fixes the API documentation in chat mode */
-.api-docs.svelte-1iguv9h.svelte-1iguv9h.svelte-1iguv9h {
- display: grid;
-}
-
-.pending.svelte-1ed2p3z {
- opacity: 1;
-}
-
-#extensions {
- padding: 0;
-}
-
-#gradio-chatbot {
- height: 66.67vh;
-}
-
-.wrap.svelte-6roggh.svelte-6roggh {
- max-height: 92.5%;
-}
-
-/* This is for the microphone button in the whisper extension */
-.sm.svelte-1ipelgc {
- width: 100%;
-}
-
-#main button {
- min-width: 0 !important;
-}
-
-#main > :first-child, #extensions {
- max-width: 800px;
- margin-left: auto;
- margin-right: auto;
-}
-
-@media screen and (max-width: 688px) {
- #main {
- padding: 0px;
- }
-
- .chat {
- height: calc(100vh - 274px) !important;
- }
-}
-
-/*****************************************************/
-/*************** Chat box declarations ***************/
-/*****************************************************/
-
-.chat {
- margin-left: auto;
- margin-right: auto;
- max-width: 800px;
- height: calc(100vh - 286px);
- overflow-y: auto;
- padding-right: 20px;
- display: flex;
- flex-direction: column-reverse;
- word-break: break-word;
- overflow-wrap: anywhere;
- padding-top: 1px;
-}
-
-.chat > .messages {
- display: flex;
- flex-direction: column;
-}
-
-.message-body li {
- margin-top: 0.5em !important;
- margin-bottom: 0.5em !important;
-}
-
-.message-body li > p {
- display: inline !important;
-}
-
-.message-body ul, .message-body ol {
- font-size: 15px !important;
-}
-
-.message-body ul {
- list-style-type: disc !important;
-}
-
-.message-body pre {
- margin-bottom: 1.25em !important;
-}
-
-.message-body code {
- white-space: pre-wrap !important;
- word-wrap: break-word !important;
-}
-
-.message-body :not(pre) > code {
- white-space: normal !important;
-}
-
-@media print {
- body {
- visibility: hidden;
- }
-
- .chat {
- visibility: visible;
- position: absolute;
- left: 0;
- top: 0;
- max-width: none;
- max-height: none;
- width: 100%;
- height: fit-content;
- display: flex;
- flex-direction: column-reverse;
- }
-
- .message {
- break-inside: avoid;
- }
-
- .gradio-container {
- overflow: visible;
- }
-
- .tab-nav {
- display: none !important;
- }
-}
diff --git a/css/main.css b/css/main.css
index d37e3f63..5f293921 100644
--- a/css/main.css
+++ b/css/main.css
@@ -45,13 +45,6 @@
min-height: 0
}
-#save_session {
- margin-top: 32px;
-}
-
-#accordion {
-}
-
.dark svg {
fill: white;
}
@@ -64,7 +57,7 @@ ol li p, ul li p {
display: inline-block;
}
-#main, #parameters, #chat-settings, #lora, #training-tab, #model-tab, #session-tab {
+#chat-tab, #default-tab, #notebook-tab, #parameters, #chat-settings, #lora, #training-tab, #model-tab, #session-tab {
border: 0;
}
@@ -78,7 +71,6 @@ ol li p, ul li p {
}
#extensions {
- padding: 15px;
margin-bottom: 35px;
}
@@ -108,7 +100,7 @@ div.svelte-15lo0d8 > *, div.svelte-15lo0d8 > .form > * {
}
.textbox_default textarea {
- height: calc(100vh - 380px);
+ height: calc(100vh - 310px);
}
.textbox_default_output textarea {
@@ -128,6 +120,12 @@ div.svelte-15lo0d8 > *, div.svelte-15lo0d8 > .form > * {
color: #efefef !important;
}
+@media screen and (max-width: 711px) {
+ .textbox_default textarea {
+ height: calc(100vh - 275px);
+ }
+}
+
/* Hide the gradio footer*/
footer {
display: none !important;
@@ -193,3 +191,141 @@ button {
.dark .pretty_scrollbar::-webkit-resizer {
background: #374151;
}
+
+/*****************************************************/
+/*************** Chat UI declarations ****************/
+/*****************************************************/
+
+.h-\[40vh\], .wrap.svelte-byatnx.svelte-byatnx.svelte-byatnx {
+ height: 66.67vh
+}
+
+.gradio-container {
+ margin-left: auto !important;
+ margin-right: auto !important;
+}
+
+.w-screen {
+ width: unset
+}
+
+div.svelte-362y77>*, div.svelte-362y77>.form>* {
+ flex-wrap: nowrap
+}
+
+.pending.svelte-1ed2p3z {
+ opacity: 1;
+}
+
+#gradio-chatbot {
+ height: 66.67vh;
+}
+
+.wrap.svelte-6roggh.svelte-6roggh {
+ max-height: 92.5%;
+}
+
+/* This is for the microphone button in the whisper extension */
+.sm.svelte-1ipelgc {
+ width: 100%;
+}
+
+#chat-tab button, #notebook-tab button, #default-tab button {
+ min-width: 0 !important;
+}
+
+#chat-tab > :first-child, #extensions {
+ max-width: 800px;
+ margin-left: auto;
+ margin-right: auto;
+}
+
+@media screen and (max-width: 688px) {
+ #chat-tab {
+ padding: 0px;
+ }
+
+ .chat {
+ height: calc(100vh - 274px) !important;
+ }
+}
+
+.chat {
+ margin-left: auto;
+ margin-right: auto;
+ max-width: 800px;
+ height: calc(100vh - 286px);
+ overflow-y: auto;
+ padding-right: 20px;
+ display: flex;
+ flex-direction: column-reverse;
+ word-break: break-word;
+ overflow-wrap: anywhere;
+ padding-top: 1px;
+}
+
+.chat > .messages {
+ display: flex;
+ flex-direction: column;
+}
+
+.message-body li {
+ margin-top: 0.5em !important;
+ margin-bottom: 0.5em !important;
+}
+
+.message-body li > p {
+ display: inline !important;
+}
+
+.message-body ul, .message-body ol {
+ font-size: 15px !important;
+}
+
+.message-body ul {
+ list-style-type: disc !important;
+}
+
+.message-body pre {
+ margin-bottom: 1.25em !important;
+}
+
+.message-body code {
+ white-space: pre-wrap !important;
+ word-wrap: break-word !important;
+}
+
+.message-body :not(pre) > code {
+ white-space: normal !important;
+}
+
+@media print {
+ body {
+ visibility: hidden;
+ }
+
+ .chat {
+ visibility: visible;
+ position: absolute;
+ left: 0;
+ top: 0;
+ max-width: none;
+ max-height: none;
+ width: 100%;
+ height: fit-content;
+ display: flex;
+ flex-direction: column-reverse;
+ }
+
+ .message {
+ break-inside: avoid;
+ }
+
+ .gradio-container {
+ overflow: visible;
+ }
+
+ .tab-nav {
+ display: none !important;
+ }
+}
diff --git a/docs/Extensions.md b/docs/Extensions.md
index 4e59e855..53acce59 100644
--- a/docs/Extensions.md
+++ b/docs/Extensions.md
@@ -39,8 +39,8 @@ The extensions framework is based on special functions and variables that you ca
| `def ui()` | Creates custom gradio elements when the UI is launched. |
| `def custom_css()` | Returns custom CSS as a string. It is applied whenever the web UI is loaded. |
| `def custom_js()` | Same as above but for javascript. |
-| `def input_modifier(string, state)` | Modifies the input string before it enters the model. In chat mode, it is applied to the user message. Otherwise, it is applied to the entire prompt. |
-| `def output_modifier(string, state)` | Modifies the output string before it is presented in the UI. In chat mode, it is applied to the bot's reply. Otherwise, it is applied to the entire output. |
+| `def input_modifier(string, state, is_chat=False)` | Modifies the input string before it enters the model. In chat mode, it is applied to the user message. Otherwise, it is applied to the entire prompt. |
+| `def output_modifier(string, state, is_chat=False)` | Modifies the output string before it is presented in the UI. In chat mode, it is applied to the bot's reply. Otherwise, it is applied to the entire output. |
| `def chat_input_modifier(text, visible_text, state)` | Modifies both the visible and internal inputs in chat mode. Can be used to hijack the chat input with custom content. |
| `def bot_prefix_modifier(string, state)` | Applied in chat mode to the prefix for the bot's reply. |
| `def state_modifier(state)` | Modifies the dictionary containing the UI input parameters before it is used by the text generation functions. |
@@ -163,7 +163,7 @@ def chat_input_modifier(text, visible_text, state):
"""
return text, visible_text
-def input_modifier(string, state):
+def input_modifier(string, state, is_chat=False):
"""
In default/notebook modes, modifies the whole prompt.
@@ -196,7 +196,7 @@ def logits_processor_modifier(processor_list, input_ids):
processor_list.append(MyLogits())
return processor_list
-def output_modifier(string, state):
+def output_modifier(string, state, is_chat=False):
"""
Modifies the LLM output before it gets presented.
diff --git a/extensions/api/util.py b/extensions/api/util.py
index 7ebfaa32..0db1c46c 100644
--- a/extensions/api/util.py
+++ b/extensions/api/util.py
@@ -68,8 +68,6 @@ def build_parameters(body, chat=False):
name1, name2, _, greeting, context, _ = load_character_memoized(character, str(body.get('your_name', shared.settings['name1'])), shared.settings['name2'], instruct=False)
name1_instruct, name2_instruct, _, _, context_instruct, turn_template = load_character_memoized(instruction_template, '', '', instruct=True)
generate_params.update({
- 'stop_at_newline': bool(body.get('stop_at_newline', shared.settings['stop_at_newline'])),
- 'chat_generation_attempts': int(body.get('chat_generation_attempts', shared.settings['chat_generation_attempts'])),
'mode': str(body.get('mode', 'chat')),
'name1': str(body.get('name1', name1)),
'name2': str(body.get('name2', name2)),
diff --git a/extensions/elevenlabs_tts/script.py b/extensions/elevenlabs_tts/script.py
index f74e1047..2324d782 100644
--- a/extensions/elevenlabs_tts/script.py
+++ b/extensions/elevenlabs_tts/script.py
@@ -4,9 +4,9 @@ from pathlib import Path
import elevenlabs
import gradio as gr
-from modules import chat, shared
-from modules.utils import gradio
+from modules import chat, shared, ui_chat
from modules.logging_colors import logger
+from modules.utils import gradio
params = {
'activate': True,
@@ -167,24 +167,23 @@ def ui():
convert_cancel = gr.Button('Cancel', visible=False)
convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False)
- if shared.is_chat():
- # Convert history with confirmation
- convert_arr = [convert_confirm, convert, convert_cancel]
- convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
- convert_confirm.click(
- lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then(
- remove_tts_from_history, gradio('history'), gradio('history')).then(
- chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None).then(
- chat.redraw_html, shared.reload_inputs, gradio('display'))
+ # Convert history with confirmation
+ convert_arr = [convert_confirm, convert, convert_cancel]
+ convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
+ convert_confirm.click(
+ lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then(
+ remove_tts_from_history, gradio('history'), gradio('history')).then(
+ chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None).then(
+ chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))
- convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
+ convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
- # Toggle message text in history
- show_text.change(
- lambda x: params.update({"show_text": x}), show_text, None).then(
- toggle_text_in_history, gradio('history'), gradio('history')).then(
- chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None).then(
- chat.redraw_html, shared.reload_inputs, gradio('display'))
+ # Toggle message text in history
+ show_text.change(
+ lambda x: params.update({"show_text": x}), show_text, None).then(
+ toggle_text_in_history, gradio('history'), gradio('history')).then(
+ chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None).then(
+ chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))
# Event functions to update the parameters in the backend
activate.change(lambda x: params.update({'activate': x}), activate, None)
diff --git a/extensions/example/script.py b/extensions/example/script.py
index b4db7102..44f0cb3c 100644
--- a/extensions/example/script.py
+++ b/extensions/example/script.py
@@ -59,7 +59,7 @@ def chat_input_modifier(text, visible_text, state):
"""
return text, visible_text
-def input_modifier(string, state):
+def input_modifier(string, state, is_chat=False):
"""
In default/notebook modes, modifies the whole prompt.
@@ -92,7 +92,7 @@ def logits_processor_modifier(processor_list, input_ids):
processor_list.append(MyLogits())
return processor_list
-def output_modifier(string, state):
+def output_modifier(string, state, is_chat=False):
"""
Modifies the LLM output before it gets presented.
diff --git a/extensions/gallery/script.js b/extensions/gallery/script.js
new file mode 100644
index 00000000..878401ec
--- /dev/null
+++ b/extensions/gallery/script.js
@@ -0,0 +1,14 @@
+let gallery_element = document.getElementById('gallery-extension');
+
+main_parent.addEventListener('click', function(e) {
+ let chat_visible = (chat_tab.offsetHeight > 0 && chat_tab.offsetWidth > 0);
+ let notebook_visible = (notebook_tab.offsetHeight > 0 && notebook_tab.offsetWidth > 0);
+ let default_visible = (default_tab.offsetHeight > 0 && default_tab.offsetWidth > 0);
+
+ // Only show this extension in the Chat tab
+ if (chat_visible) {
+ gallery_element.style.display = 'flex';
+ } else {
+ gallery_element.style.display = 'none';
+ }
+});
diff --git a/extensions/gallery/script.py b/extensions/gallery/script.py
index 993ef273..611a11f4 100644
--- a/extensions/gallery/script.py
+++ b/extensions/gallery/script.py
@@ -82,8 +82,13 @@ def select_character(evt: gr.SelectData):
return (evt.value[1])
+def custom_js():
+ path_to_js = Path(__file__).parent.resolve() / 'script.js'
+ return open(path_to_js, 'r').read()
+
+
def ui():
- with gr.Accordion("Character gallery", open=False):
+ with gr.Accordion("Character gallery", open=False, elem_id='gallery-extension'):
update = gr.Button("Refresh")
gr.HTML(value="")
gallery = gr.Dataset(components=[gr.HTML(visible=False)],
diff --git a/extensions/send_pictures/script.py b/extensions/send_pictures/script.py
index 39c9362a..f8e6c969 100644
--- a/extensions/send_pictures/script.py
+++ b/extensions/send_pictures/script.py
@@ -5,7 +5,7 @@ import gradio as gr
import torch
from transformers import BlipForConditionalGeneration, BlipProcessor
-from modules import chat, shared
+from modules import chat, shared, ui_chat
from modules.ui import gather_interface_values
from modules.utils import gradio
@@ -54,5 +54,5 @@ def ui():
"value": generate_chat_picture(picture, name1, name2)
}), [picture_select, shared.gradio['name1'], shared.gradio['name2']], None).then(
gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
- chat.generate_chat_reply_wrapper, shared.input_params, gradio('display', 'history'), show_progress=False).then(
+ chat.generate_chat_reply_wrapper, gradio(ui_chat.inputs), gradio('display', 'history'), show_progress=False).then(
lambda: None, None, picture_select, show_progress=False)
diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py
index b96a47fd..707d919b 100644
--- a/extensions/silero_tts/script.py
+++ b/extensions/silero_tts/script.py
@@ -6,7 +6,7 @@ import gradio as gr
import torch
from extensions.silero_tts import tts_preprocessor
-from modules import chat, shared
+from modules import chat, shared, ui_chat
from modules.utils import gradio
torch._C._jit_set_profiling_mode(False)
@@ -194,24 +194,23 @@ def ui():
convert_cancel = gr.Button('Cancel', visible=False)
convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False)
- if shared.is_chat():
- # Convert history with confirmation
- convert_arr = [convert_confirm, convert, convert_cancel]
- convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
- convert_confirm.click(
- lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then(
- remove_tts_from_history, gradio('history'), gradio('history')).then(
- chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None).then(
- chat.redraw_html, shared.reload_inputs, gradio('display'))
+ # Convert history with confirmation
+ convert_arr = [convert_confirm, convert, convert_cancel]
+ convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)
+ convert_confirm.click(
+ lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr).then(
+ remove_tts_from_history, gradio('history'), gradio('history')).then(
+ chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None).then(
+ chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))
- convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
+ convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)
- # Toggle message text in history
- show_text.change(
- lambda x: params.update({"show_text": x}), show_text, None).then(
- toggle_text_in_history, gradio('history'), gradio('history')).then(
- chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None).then(
- chat.redraw_html, shared.reload_inputs, gradio('display'))
+ # Toggle message text in history
+ show_text.change(
+ lambda x: params.update({"show_text": x}), show_text, None).then(
+ toggle_text_in_history, gradio('history'), gradio('history')).then(
+ chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None).then(
+ chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))
# Event functions to update the parameters in the backend
activate.change(lambda x: params.update({"activate": x}), activate, None)
diff --git a/extensions/superbooga/script.py b/extensions/superbooga/script.py
index 475cf1e0..06fe8ad3 100644
--- a/extensions/superbooga/script.py
+++ b/extensions/superbooga/script.py
@@ -4,7 +4,7 @@ import textwrap
import gradio as gr
from bs4 import BeautifulSoup
-from modules import chat, shared
+from modules import chat
from modules.logging_colors import logger
from .chromadb import add_chunks_to_collector, make_collector
@@ -143,8 +143,8 @@ def remove_special_tokens(string):
return re.sub(pattern, '', string)
-def input_modifier(string):
- if shared.is_chat():
+def input_modifier(string, state, is_chat=False):
+ if is_chat:
return string
# Find the user input
diff --git a/js/main.js b/js/main.js
index 7a2368fe..40197869 100644
--- a/js/main.js
+++ b/js/main.js
@@ -1,17 +1,30 @@
-document.getElementById("main").parentNode.childNodes[0].classList.add("header_bar");
-document.getElementById("main").parentNode.style = "padding: 0; margin: 0";
-document.getElementById("main").parentNode.parentNode.parentNode.style = "padding: 0";
+let chat_tab = document.getElementById('chat-tab');
+let notebook_tab = document.getElementById('notebook-tab');
+let default_tab = document.getElementById('default-tab');
-// Get references to the elements
-let main = document.getElementById('main');
-let main_parent = main.parentNode;
+let main_parent = chat_tab.parentNode;
let extensions = document.getElementById('extensions');
-// Add an event listener to the main element
+main_parent.childNodes[0].classList.add("header_bar");
+main_parent.style = "padding: 0; margin: 0";
+main_parent.parentNode.parentNode.style = "padding: 0";
+
+// Add an event listener to the generation tabs
main_parent.addEventListener('click', function(e) {
- // Check if the main element is visible
- if (main.offsetHeight > 0 && main.offsetWidth > 0) {
+ let chat_visible = (chat_tab.offsetHeight > 0 && chat_tab.offsetWidth > 0);
+ let notebook_visible = (notebook_tab.offsetHeight > 0 && notebook_tab.offsetWidth > 0);
+ let default_visible = (default_tab.offsetHeight > 0 && default_tab.offsetWidth > 0);
+
+ // Check if one of the generation tabs is visible
+ if (chat_visible || notebook_visible || default_visible) {
extensions.style.display = 'flex';
+ if (chat_visible) {
+ extensions.style.maxWidth = "800px";
+ extensions.style.padding = "0px";
+ } else {
+ extensions.style.maxWidth = "none";
+ extensions.style.padding = "15px";
+ }
} else {
extensions.style.display = 'none';
}
diff --git a/js/save_files.js b/js/save_files.js
index 7dfbcfda..d5b22c4b 100644
--- a/js/save_files.js
+++ b/js/save_files.js
@@ -32,9 +32,9 @@ function saveHistory(history, character, mode) {
saveFile(history, path);
}
-function saveSession(session, mode) {
+function saveSession(session) {
let path = null;
- path = `session_${mode}_${getCurrentTimestamp()}.json`;
+ path = `session_${getCurrentTimestamp()}.json`;
saveFile(session, path);
}
diff --git a/modules/chat.py b/modules/chat.py
index c2a05d3f..e2bba18f 100644
--- a/modules/chat.py
+++ b/modules/chat.py
@@ -175,9 +175,6 @@ def get_stopping_strings(state):
f"\n{state['name2']}:"
]
- if state['stop_at_newline']:
- stopping_strings.append("\n")
-
return stopping_strings
@@ -201,7 +198,7 @@ def chatbot_wrapper(text, state, regenerate=False, _continue=False, loading_mess
if not any((regenerate, _continue)):
visible_text = text
text, visible_text = apply_extensions('chat_input', text, visible_text, state)
- text = apply_extensions('input', text, state)
+ text = apply_extensions('input', text, state, is_chat=True)
# *Is typing...*
if loading_message:
@@ -230,45 +227,37 @@ def chatbot_wrapper(text, state, regenerate=False, _continue=False, loading_mess
prompt = generate_chat_prompt(text, state, **kwargs)
# Generate
- cumulative_reply = ''
- for i in range(state['chat_generation_attempts']):
- reply = None
- for j, reply in enumerate(generate_reply(prompt + cumulative_reply, state, stopping_strings=stopping_strings, is_chat=True)):
- reply = cumulative_reply + reply
+ reply = None
+ for j, reply in enumerate(generate_reply(prompt, state, stopping_strings=stopping_strings, is_chat=True)):
- # Extract the reply
- visible_reply = re.sub("(||{{user}})", state['name1'], reply)
+ # Extract the reply
+ visible_reply = re.sub("(||{{user}})", state['name1'], reply)
- # We need this global variable to handle the Stop event,
- # otherwise gradio gets confused
- if shared.stop_everything:
- output['visible'][-1][1] = apply_extensions('output', output['visible'][-1][1], state)
+ # We need this global variable to handle the Stop event,
+ # otherwise gradio gets confused
+ if shared.stop_everything:
+ output['visible'][-1][1] = apply_extensions('output', output['visible'][-1][1], state, is_chat=True)
+ yield output
+ return
+
+ if just_started:
+ just_started = False
+ if not _continue:
+ output['internal'].append(['', ''])
+ output['visible'].append(['', ''])
+
+ if _continue:
+ output['internal'][-1] = [text, last_reply[0] + reply]
+ output['visible'][-1] = [visible_text, last_reply[1] + visible_reply]
+ if is_stream:
+ yield output
+ elif not (j == 0 and visible_reply.strip() == ''):
+ output['internal'][-1] = [text, reply.lstrip(' ')]
+ output['visible'][-1] = [visible_text, visible_reply.lstrip(' ')]
+ if is_stream:
yield output
- return
- if just_started:
- just_started = False
- if not _continue:
- output['internal'].append(['', ''])
- output['visible'].append(['', ''])
-
- if _continue:
- output['internal'][-1] = [text, last_reply[0] + reply]
- output['visible'][-1] = [visible_text, last_reply[1] + visible_reply]
- if is_stream:
- yield output
- elif not (j == 0 and visible_reply.strip() == ''):
- output['internal'][-1] = [text, reply.lstrip(' ')]
- output['visible'][-1] = [visible_text, visible_reply.lstrip(' ')]
- if is_stream:
- yield output
-
- if reply in [None, cumulative_reply]:
- break
- else:
- cumulative_reply = reply
-
- output['visible'][-1][1] = apply_extensions('output', output['visible'][-1][1], state)
+ output['visible'][-1][1] = apply_extensions('output', output['visible'][-1][1], state, is_chat=True)
yield output
@@ -278,27 +267,15 @@ def impersonate_wrapper(text, start_with, state):
yield ''
return
- # Defining some variables
- cumulative_reply = ''
prompt = generate_chat_prompt('', state, impersonate=True)
stopping_strings = get_stopping_strings(state)
yield text + '...'
- cumulative_reply = text
- for i in range(state['chat_generation_attempts']):
- reply = None
- for reply in generate_reply(prompt + cumulative_reply, state, stopping_strings=stopping_strings, is_chat=True):
- reply = cumulative_reply + reply
- yield reply.lstrip(' ')
- if shared.stop_everything:
- return
-
- if reply in [None, cumulative_reply]:
- break
- else:
- cumulative_reply = reply
-
- yield cumulative_reply.lstrip(' ')
+ reply = None
+ for reply in generate_reply(prompt, state, stopping_strings=stopping_strings, is_chat=True):
+ yield reply.lstrip(' ')
+ if shared.stop_everything:
+ return
def generate_chat_reply(text, state, regenerate=False, _continue=False, loading_message=True):
@@ -352,7 +329,7 @@ def replace_last_reply(text, state):
return history
elif len(history['visible']) > 0:
history['visible'][-1][1] = text
- history['internal'][-1][1] = apply_extensions('input', text, state)
+ history['internal'][-1][1] = apply_extensions('input', text, state, is_chat=True)
return history
@@ -360,7 +337,7 @@ def replace_last_reply(text, state):
def send_dummy_message(text, state):
history = state['history']
history['visible'].append([text, ''])
- history['internal'].append([apply_extensions('input', text, state), ''])
+ history['internal'].append([apply_extensions('input', text, state, is_chat=True), ''])
return history
@@ -371,7 +348,7 @@ def send_dummy_reply(text, state):
history['internal'].append(['', ''])
history['visible'][-1][1] = text
- history['internal'][-1][1] = apply_extensions('input', text, state)
+ history['internal'][-1][1] = apply_extensions('input', text, state, is_chat=True)
return history
@@ -385,7 +362,7 @@ def clear_chat_log(state):
if mode != 'instruct':
if greeting != '':
history['internal'] += [['<|BEGIN-VISIBLE-CHAT|>', greeting]]
- history['visible'] += [['', apply_extensions('output', greeting, state)]]
+ history['visible'] += [['', apply_extensions('output', greeting, state, is_chat=True)]]
return history
@@ -452,7 +429,7 @@ def load_persistent_history(state):
history = {'internal': [], 'visible': []}
if greeting != "":
history['internal'] += [['<|BEGIN-VISIBLE-CHAT|>', greeting]]
- history['visible'] += [['', apply_extensions('output', greeting, state)]]
+ history['visible'] += [['', apply_extensions('output', greeting, state, is_chat=True)]]
return history
diff --git a/modules/extensions.py b/modules/extensions.py
index 76b6be8b..796ff072 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -53,14 +53,32 @@ def iterator():
# Extension functions that map string -> string
-def _apply_string_extensions(function_name, text, state):
+def _apply_string_extensions(function_name, text, state, is_chat=False):
for extension, _ in iterator():
if hasattr(extension, function_name):
func = getattr(extension, function_name)
- if len(signature(func).parameters) == 2:
- text = func(text, state)
+
+ # Handle old extensions without the 'state' arg or
+ # the 'is_chat' kwarg
+ count = 0
+ has_chat = False
+ for k in signature(func).parameters:
+ if k == 'is_chat':
+ has_chat = True
+ else:
+ count += 1
+
+ if count == 2:
+ args = [text, state]
else:
- text = func(text)
+ args = [text]
+
+ if has_chat:
+ kwargs = {'is_chat': is_chat}
+ else:
+ kwargs = {}
+
+ text = func(*args, **kwargs)
return text
@@ -169,9 +187,7 @@ def create_extensions_block():
if len(to_display) > 0:
with gr.Column(elem_id="extensions"):
for row in to_display:
- extension, name = row
- display_name = getattr(extension, 'params', {}).get('display_name', name)
- gr.Markdown(f"\n### {display_name}")
+ extension, _ = row
extension.ui()
diff --git a/modules/shared.py b/modules/shared.py
index cb6f0ae1..89b5f0cb 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -19,8 +19,6 @@ lora_names = []
stop_everything = False
generation_lock = None
processing_message = '*Is typing...*'
-input_params = []
-reload_inputs = []
# UI variables
gradio = {}
@@ -45,7 +43,6 @@ settings = {
'greeting': '',
'turn_template': '',
'custom_stopping_strings': '',
- 'stop_at_newline': False,
'add_bos_token': True,
'ban_eos_token': False,
'skip_special_tokens': True,
@@ -57,11 +54,7 @@ settings = {
'chat_style': 'TheEncrypted777',
'instruction_template': 'None',
'chat-instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>',
- 'chat_generation_attempts': 1,
- 'chat_generation_attempts_min': 1,
- 'chat_generation_attempts_max': 10,
- 'default_extensions': [],
- 'chat_default_extensions': ['gallery'],
+ 'default_extensions': ['gallery'],
'preset': 'simple-1',
'prompt': 'QA',
}
@@ -81,8 +74,8 @@ def str2bool(v):
parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=54))
# Basic settings
-parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.')
-parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode with a style similar to the Character.AI website.')
+parser.add_argument('--notebook', action='store_true', help='DEPRECATED')
+parser.add_argument('--chat', action='store_true', help='DEPRECATED')
parser.add_argument('--multi-user', action='store_true', help='Multi-user mode. Chat histories are not saved or automatically loaded. WARNING: this is highly experimental.')
parser.add_argument('--character', type=str, help='The name of the character to load in chat mode by default.')
parser.add_argument('--model', type=str, help='Name of the model to load by default.')
@@ -187,6 +180,11 @@ parser.add_argument('--multimodal-pipeline', type=str, default=None, help='The m
args = parser.parse_args()
args_defaults = parser.parse_args([])
+# Deprecation warnings
+for k in ['chat', 'notebook']:
+ if getattr(args, k):
+ logger.warning(f'--{k} has been deprecated and will be removed soon. Please remove that flag.')
+
# Security warnings
if args.trust_remote_code:
logger.warning("trust_remote_code is enabled. This is dangerous.")
@@ -227,16 +225,7 @@ def add_extension(name):
def is_chat():
- return args.chat
-
-
-def get_mode():
- if args.chat:
- return 'chat'
- elif args.notebook:
- return 'notebook'
- else:
- return 'default'
+ return True
args.loader = fix_loader_name(args.loader)
diff --git a/modules/ui.py b/modules/ui.py
index b58b7dd6..e7817f73 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -8,10 +8,8 @@ from modules import shared
with open(Path(__file__).resolve().parent / '../css/main.css', 'r') as f:
css = f.read()
-with open(Path(__file__).resolve().parent / '../css/chat.css', 'r') as f:
- chat_css = f.read()
with open(Path(__file__).resolve().parent / '../js/main.js', 'r') as f:
- main_js = f.read()
+ js = f.read()
with open(Path(__file__).resolve().parent / '../js/save_files.js', 'r') as f:
save_files_js = f.read()
@@ -116,31 +114,35 @@ def list_interface_input_elements():
'top_a',
]
- if shared.args.chat:
- elements += [
- 'character_menu',
- 'history',
- 'name1',
- 'name2',
- 'greeting',
- 'context',
- 'chat_generation_attempts',
- 'stop_at_newline',
- 'mode',
- 'instruction_template',
- 'name1_instruct',
- 'name2_instruct',
- 'context_instruct',
- 'turn_template',
- 'chat_style',
- 'chat-instruct_command',
- ]
- else:
- elements.append('textbox')
- if not shared.args.notebook:
- elements.append('output_textbox')
+ # Chat elements
+ elements += [
+ 'textbox',
+ 'character_menu',
+ 'history',
+ 'name1',
+ 'name2',
+ 'greeting',
+ 'context',
+ 'mode',
+ 'instruction_template',
+ 'name1_instruct',
+ 'name2_instruct',
+ 'context_instruct',
+ 'turn_template',
+ 'chat_style',
+ 'chat-instruct_command',
+ ]
+ # Notebook/default elements
+ elements += [
+ 'textbox-notebook',
+ 'textbox-default',
+ 'output_textbox'
+ ]
+
+ # Model elements
elements += list_model_elements()
+
return elements
diff --git a/modules/ui_chat.py b/modules/ui_chat.py
index 1d73adf7..76e70ed0 100644
--- a/modules/ui_chat.py
+++ b/modules/ui_chat.py
@@ -10,14 +10,17 @@ from modules.html_generator import chat_html_wrapper
from modules.text_generation import stop_everything_event
from modules.utils import gradio
+inputs = ('Chat input', 'start_with', 'interface_state')
+reload_arr = ('history', 'name1', 'name2', 'mode', 'chat_style')
+clear_arr = ('Clear history-confirm', 'Clear history', 'Clear history-cancel')
+
def create_ui():
- shared.gradio['interface_state'] = gr.State({k: None for k in shared.input_elements})
shared.gradio['Chat input'] = gr.State()
shared.gradio['dummy'] = gr.State()
shared.gradio['history'] = gr.State({'internal': [], 'visible': []})
- with gr.Tab('Text generation', elem_id='main'):
+ with gr.Tab('Chat', elem_id='chat-tab'):
shared.gradio['display'] = gr.HTML(value=chat_html_wrapper({'internal': [], 'visible': []}, shared.settings['name1'], shared.settings['name2'], 'chat', 'cai-chat'))
shared.gradio['textbox'] = gr.Textbox(label='Input')
with gr.Row():
@@ -45,82 +48,80 @@ def create_ui():
shared.gradio['start_with'] = gr.Textbox(label='Start reply with', placeholder='Sure thing!', value=shared.settings['start_with'])
with gr.Row():
- shared.gradio['mode'] = gr.Radio(choices=['chat', 'chat-instruct', 'instruct'], value=shared.settings['mode'] if shared.settings['mode'] in ['chat', 'instruct', 'chat-instruct'] else 'chat', label='Mode', info='Defines how the chat prompt is generated. In instruct and chat-instruct modes, the instruction template selected under "Chat settings" must match the current model.')
+ shared.gradio['mode'] = gr.Radio(choices=['chat', 'chat-instruct', 'instruct'], value=shared.settings['mode'] if shared.settings['mode'] in ['chat', 'instruct', 'chat-instruct'] else 'chat', label='Mode', info='Defines how the chat prompt is generated. In instruct and chat-instruct modes, the instruction template selected under Parameters > Instruction template must match the current model.')
shared.gradio['chat_style'] = gr.Dropdown(choices=utils.get_available_chat_styles(), label='Chat style', value=shared.settings['chat_style'], visible=shared.settings['mode'] != 'instruct')
- with gr.Tab('Chat settings', elem_id='chat-settings'):
- with gr.Tab('Character'):
- with gr.Row():
- with gr.Column(scale=8):
- with gr.Row():
- shared.gradio['character_menu'] = gr.Dropdown(value='None', choices=utils.get_available_characters(), label='Character', elem_id='character-menu', info='Used in chat and chat-instruct modes.', elem_classes='slim-dropdown')
- ui.create_refresh_button(shared.gradio['character_menu'], lambda: None, lambda: {'choices': utils.get_available_characters()}, 'refresh-button')
- shared.gradio['save_character'] = gr.Button('💾', elem_classes='refresh-button')
- shared.gradio['delete_character'] = gr.Button('🗑️', elem_classes='refresh-button')
- shared.gradio['name1'] = gr.Textbox(value=shared.settings['name1'], lines=1, label='Your name')
- shared.gradio['name2'] = gr.Textbox(value=shared.settings['name2'], lines=1, label='Character\'s name')
- shared.gradio['context'] = gr.Textbox(value=shared.settings['context'], lines=10, label='Context', elem_classes=['add_scrollbar'])
- shared.gradio['greeting'] = gr.Textbox(value=shared.settings['greeting'], lines=5, label='Greeting', elem_classes=['add_scrollbar'])
-
- with gr.Column(scale=1):
- shared.gradio['character_picture'] = gr.Image(label='Character picture', type='pil')
- shared.gradio['your_picture'] = gr.Image(label='Your picture', type='pil', value=Image.open(Path('cache/pfp_me.png')) if Path('cache/pfp_me.png').exists() else None)
-
- with gr.Tab('Instruction template'):
- with gr.Row():
+def create_chat_settings_ui():
+ with gr.Tab('Character'):
+ with gr.Row():
+ with gr.Column(scale=8):
with gr.Row():
- shared.gradio['instruction_template'] = gr.Dropdown(choices=utils.get_available_instruction_templates(), label='Instruction template', value='None', info='Change this according to the model/LoRA that you are using. Used in instruct and chat-instruct modes.', elem_classes='slim-dropdown')
- ui.create_refresh_button(shared.gradio['instruction_template'], lambda: None, lambda: {'choices': utils.get_available_instruction_templates()}, 'refresh-button')
- shared.gradio['save_template'] = gr.Button('💾', elem_classes='refresh-button')
- shared.gradio['delete_template'] = gr.Button('🗑️ ', elem_classes='refresh-button')
+ shared.gradio['character_menu'] = gr.Dropdown(value='None', choices=utils.get_available_characters(), label='Character', elem_id='character-menu', info='Used in chat and chat-instruct modes.', elem_classes='slim-dropdown')
+ ui.create_refresh_button(shared.gradio['character_menu'], lambda: None, lambda: {'choices': utils.get_available_characters()}, 'refresh-button')
+ shared.gradio['save_character'] = gr.Button('💾', elem_classes='refresh-button')
+ shared.gradio['delete_character'] = gr.Button('🗑️', elem_classes='refresh-button')
- shared.gradio['name1_instruct'] = gr.Textbox(value='', lines=2, label='User string')
- shared.gradio['name2_instruct'] = gr.Textbox(value='', lines=1, label='Bot string')
- shared.gradio['context_instruct'] = gr.Textbox(value='', lines=4, label='Context')
- shared.gradio['turn_template'] = gr.Textbox(value=shared.settings['turn_template'], lines=1, label='Turn template', info='Used to precisely define the placement of spaces and new line characters in instruction prompts.')
+ shared.gradio['name1'] = gr.Textbox(value=shared.settings['name1'], lines=1, label='Your name')
+ shared.gradio['name2'] = gr.Textbox(value=shared.settings['name2'], lines=1, label='Character\'s name')
+ shared.gradio['context'] = gr.Textbox(value=shared.settings['context'], lines=10, label='Context', elem_classes=['add_scrollbar'])
+ shared.gradio['greeting'] = gr.Textbox(value=shared.settings['greeting'], lines=5, label='Greeting', elem_classes=['add_scrollbar'])
+
+ with gr.Column(scale=1):
+ shared.gradio['character_picture'] = gr.Image(label='Character picture', type='pil')
+ shared.gradio['your_picture'] = gr.Image(label='Your picture', type='pil', value=Image.open(Path('cache/pfp_me.png')) if Path('cache/pfp_me.png').exists() else None)
+
+ with gr.Tab('Instruction template'):
+ with gr.Row():
with gr.Row():
- shared.gradio['chat-instruct_command'] = gr.Textbox(value=shared.settings['chat-instruct_command'], lines=4, label='Command for chat-instruct mode', info='<|character|> gets replaced by the bot name, and <|prompt|> gets replaced by the regular chat prompt.', elem_classes=['add_scrollbar'])
+ shared.gradio['instruction_template'] = gr.Dropdown(choices=utils.get_available_instruction_templates(), label='Instruction template', value='None', info='Change this according to the model/LoRA that you are using. Used in instruct and chat-instruct modes.', elem_classes='slim-dropdown')
+ ui.create_refresh_button(shared.gradio['instruction_template'], lambda: None, lambda: {'choices': utils.get_available_instruction_templates()}, 'refresh-button')
+ shared.gradio['save_template'] = gr.Button('💾', elem_classes='refresh-button')
+ shared.gradio['delete_template'] = gr.Button('🗑️ ', elem_classes='refresh-button')
- with gr.Tab('Chat history'):
+ shared.gradio['name1_instruct'] = gr.Textbox(value='', lines=2, label='User string')
+ shared.gradio['name2_instruct'] = gr.Textbox(value='', lines=1, label='Bot string')
+ shared.gradio['context_instruct'] = gr.Textbox(value='', lines=4, label='Context')
+ shared.gradio['turn_template'] = gr.Textbox(value=shared.settings['turn_template'], lines=1, label='Turn template', info='Used to precisely define the placement of spaces and new line characters in instruction prompts.')
+ with gr.Row():
+ shared.gradio['chat-instruct_command'] = gr.Textbox(value=shared.settings['chat-instruct_command'], lines=4, label='Command for chat-instruct mode', info='<|character|> gets replaced by the bot name, and <|prompt|> gets replaced by the regular chat prompt.', elem_classes=['add_scrollbar'])
+
+ with gr.Tab('Chat history'):
+ with gr.Row():
+ with gr.Column():
+ shared.gradio['save_chat_history'] = gr.Button(value='Save history')
+
+ with gr.Column():
+ shared.gradio['load_chat_history'] = gr.File(type='binary', file_types=['.json', '.txt'], label='Upload History JSON')
+
+ with gr.Tab('Upload character'):
+ with gr.Tab('YAML or JSON'):
+ with gr.Row():
+ shared.gradio['upload_json'] = gr.File(type='binary', file_types=['.json', '.yaml'], label='JSON or YAML File')
+ shared.gradio['upload_img_bot'] = gr.Image(type='pil', label='Profile Picture (optional)')
+
+ shared.gradio['Submit character'] = gr.Button(value='Submit', interactive=False)
+
+ with gr.Tab('TavernAI PNG'):
with gr.Row():
with gr.Column():
- shared.gradio['save_chat_history'] = gr.Button(value='Save history')
-
+ shared.gradio['upload_img_tavern'] = gr.Image(type='pil', label='TavernAI PNG File', elem_id='upload_img_tavern')
+ shared.gradio['tavern_json'] = gr.State()
with gr.Column():
- shared.gradio['load_chat_history'] = gr.File(type='binary', file_types=['.json', '.txt'], label='Upload History JSON')
+ shared.gradio['tavern_name'] = gr.Textbox(value='', lines=1, label='Name', interactive=False)
+ shared.gradio['tavern_desc'] = gr.Textbox(value='', lines=4, max_lines=4, label='Description', interactive=False)
- with gr.Tab('Upload character'):
- with gr.Tab('YAML or JSON'):
- with gr.Row():
- shared.gradio['upload_json'] = gr.File(type='binary', file_types=['.json', '.yaml'], label='JSON or YAML File')
- shared.gradio['upload_img_bot'] = gr.Image(type='pil', label='Profile Picture (optional)')
-
- shared.gradio['Submit character'] = gr.Button(value='Submit', interactive=False)
-
- with gr.Tab('TavernAI PNG'):
- with gr.Row():
- with gr.Column():
- shared.gradio['upload_img_tavern'] = gr.Image(type='pil', label='TavernAI PNG File', elem_id='upload_img_tavern')
- shared.gradio['tavern_json'] = gr.State()
- with gr.Column():
- shared.gradio['tavern_name'] = gr.Textbox(value='', lines=1, label='Name', interactive=False)
- shared.gradio['tavern_desc'] = gr.Textbox(value='', lines=4, max_lines=4, label='Description', interactive=False)
-
- shared.gradio['Submit tavern character'] = gr.Button(value='Submit', interactive=False)
+ shared.gradio['Submit tavern character'] = gr.Button(value='Submit', interactive=False)
def create_event_handlers():
gen_events = []
-
- shared.input_params = gradio('Chat input', 'start_with', 'interface_state')
- clear_arr = gradio('Clear history-confirm', 'Clear history', 'Clear history-cancel')
- shared.reload_inputs = gradio('history', 'name1', 'name2', 'mode', 'chat_style')
+ shared.input_params = gradio(inputs) # Obsolete, kept for compatibility with old extensions
gen_events.append(shared.gradio['Generate'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda x: (x, ''), gradio('textbox'), gradio('Chat input', 'textbox'), show_progress=False).then(
- chat.generate_chat_reply_wrapper, shared.input_params, gradio('display', 'history'), show_progress=False).then(
+ chat.generate_chat_reply_wrapper, gradio(inputs), gradio('display', 'history'), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None).then(
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
@@ -129,7 +130,7 @@ def create_event_handlers():
gen_events.append(shared.gradio['textbox'].submit(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda x: (x, ''), gradio('textbox'), gradio('Chat input', 'textbox'), show_progress=False).then(
- chat.generate_chat_reply_wrapper, shared.input_params, gradio('display', 'history'), show_progress=False).then(
+ chat.generate_chat_reply_wrapper, gradio(inputs), gradio('display', 'history'), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None).then(
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
@@ -137,7 +138,7 @@ def create_event_handlers():
gen_events.append(shared.gradio['Regenerate'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
- partial(chat.generate_chat_reply_wrapper, regenerate=True), shared.input_params, gradio('display', 'history'), show_progress=False).then(
+ partial(chat.generate_chat_reply_wrapper, regenerate=True), gradio(inputs), gradio('display', 'history'), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None).then(
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
@@ -145,7 +146,7 @@ def create_event_handlers():
gen_events.append(shared.gradio['Continue'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
- partial(chat.generate_chat_reply_wrapper, _continue=True), shared.input_params, gradio('display', 'history'), show_progress=False).then(
+ partial(chat.generate_chat_reply_wrapper, _continue=True), gradio(inputs), gradio('display', 'history'), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None).then(
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
@@ -154,7 +155,7 @@ def create_event_handlers():
gen_events.append(shared.gradio['Impersonate'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda x: x, gradio('textbox'), gradio('Chat input'), show_progress=False).then(
- chat.impersonate_wrapper, shared.input_params, gradio('textbox'), show_progress=False).then(
+ chat.impersonate_wrapper, gradio(inputs), gradio('textbox'), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
)
@@ -163,59 +164,59 @@ def create_event_handlers():
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
chat.replace_last_reply, gradio('textbox', 'interface_state'), gradio('history')).then(
lambda: '', None, gradio('textbox'), show_progress=False).then(
- chat.redraw_html, shared.reload_inputs, gradio('display')).then(
+ chat.redraw_html, gradio(reload_arr), gradio('display')).then(
chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None)
shared.gradio['Send dummy message'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
chat.send_dummy_message, gradio('textbox', 'interface_state'), gradio('history')).then(
lambda: '', None, gradio('textbox'), show_progress=False).then(
- chat.redraw_html, shared.reload_inputs, gradio('display')).then(
+ chat.redraw_html, gradio(reload_arr), gradio('display')).then(
chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None)
shared.gradio['Send dummy reply'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
chat.send_dummy_reply, gradio('textbox', 'interface_state'), gradio('history')).then(
lambda: '', None, gradio('textbox'), show_progress=False).then(
- chat.redraw_html, shared.reload_inputs, gradio('display')).then(
+ chat.redraw_html, gradio(reload_arr), gradio('display')).then(
chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None)
- shared.gradio['Clear history'].click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, clear_arr)
- shared.gradio['Clear history-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr)
+ shared.gradio['Clear history'].click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, gradio(clear_arr))
+ shared.gradio['Clear history-cancel'].click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, gradio(clear_arr))
shared.gradio['Clear history-confirm'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
- lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, clear_arr).then(
+ lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, gradio(clear_arr)).then(
chat.clear_chat_log, gradio('interface_state'), gradio('history')).then(
- chat.redraw_html, shared.reload_inputs, gradio('display')).then(
+ chat.redraw_html, gradio(reload_arr), gradio('display')).then(
chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None)
shared.gradio['Remove last'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
chat.remove_last_message, gradio('history'), gradio('textbox', 'history'), show_progress=False).then(
- chat.redraw_html, shared.reload_inputs, gradio('display')).then(
+ chat.redraw_html, gradio(reload_arr), gradio('display')).then(
chat.save_persistent_history, gradio('history', 'character_menu', 'mode'), None)
shared.gradio['character_menu'].change(
partial(chat.load_character, instruct=False), gradio('character_menu', 'name1', 'name2'), gradio('name1', 'name2', 'character_picture', 'greeting', 'context', 'dummy')).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
chat.load_persistent_history, gradio('interface_state'), gradio('history')).then(
- chat.redraw_html, shared.reload_inputs, gradio('display'))
+ chat.redraw_html, gradio(reload_arr), gradio('display'))
shared.gradio['Stop'].click(
stop_everything_event, None, None, queue=False, cancels=gen_events if shared.args.no_stream else None).then(
- chat.redraw_html, shared.reload_inputs, gradio('display'))
+ chat.redraw_html, gradio(reload_arr), gradio('display'))
shared.gradio['mode'].change(
lambda x: gr.update(visible=x != 'instruct'), gradio('mode'), gradio('chat_style'), show_progress=False).then(
- chat.redraw_html, shared.reload_inputs, gradio('display'))
+ chat.redraw_html, gradio(reload_arr), gradio('display'))
- shared.gradio['chat_style'].change(chat.redraw_html, shared.reload_inputs, gradio('display'))
+ shared.gradio['chat_style'].change(chat.redraw_html, gradio(reload_arr), gradio('display'))
shared.gradio['instruction_template'].change(
partial(chat.load_character, instruct=True), gradio('instruction_template', 'name1_instruct', 'name2_instruct'), gradio('name1_instruct', 'name2_instruct', 'dummy', 'dummy', 'context_instruct', 'turn_template'))
shared.gradio['load_chat_history'].upload(
chat.load_history, gradio('load_chat_history', 'history'), gradio('history')).then(
- chat.redraw_html, shared.reload_inputs, gradio('display')).then(
+ chat.redraw_html, gradio(reload_arr), gradio('display')).then(
None, None, None, _js='() => {alert("The history has been loaded.")}')
shared.gradio['Copy last reply'].click(chat.send_last_reply_to_input, gradio('history'), gradio('textbox'), show_progress=False)
@@ -256,4 +257,4 @@ def create_event_handlers():
shared.gradio['upload_img_tavern'].clear(lambda: (None, None, None, gr.update(interactive=False)), None, gradio('tavern_name', 'tavern_desc', 'tavern_json', 'Submit tavern character'), show_progress=False)
shared.gradio['your_picture'].change(
chat.upload_your_profile_picture, gradio('your_picture'), None).then(
- partial(chat.redraw_html, reset_cache=True), shared.reload_inputs, gradio('display'))
+ partial(chat.redraw_html, reset_cache=True), gradio(reload_arr), gradio('display'))
diff --git a/modules/ui_default.py b/modules/ui_default.py
index b879e1ef..d26863bc 100644
--- a/modules/ui_default.py
+++ b/modules/ui_default.py
@@ -8,87 +8,85 @@ from modules.text_generation import (
)
from modules.utils import gradio
+inputs = ('textbox-default', 'interface_state')
+outputs = ('output_textbox', 'html-default')
+
def create_ui():
default_text = load_prompt(shared.settings['prompt'])
- shared.gradio['interface_state'] = gr.State({k: None for k in shared.input_elements})
- shared.gradio['last_input'] = gr.State('')
-
- with gr.Tab('Text generation', elem_id='main'):
+ with gr.Tab('Default', elem_id='default-tab'):
+ shared.gradio['last_input-default'] = gr.State('')
with gr.Row():
with gr.Column():
- shared.gradio['textbox'] = gr.Textbox(value=default_text, elem_classes=['textbox_default', 'add_scrollbar'], lines=27, label='Input')
- shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
+ shared.gradio['textbox-default'] = gr.Textbox(value=default_text, elem_classes=['textbox_default', 'add_scrollbar'], lines=27, label='Input')
with gr.Row():
- shared.gradio['Generate'] = gr.Button('Generate', variant='primary')
- shared.gradio['Stop'] = gr.Button('Stop', elem_id='stop')
- shared.gradio['Continue'] = gr.Button('Continue')
- shared.gradio['count_tokens'] = gr.Button('Count tokens')
+ shared.gradio['Generate-default'] = gr.Button('Generate', variant='primary')
+ shared.gradio['Stop-default'] = gr.Button('Stop', elem_id='stop')
+ shared.gradio['Continue-default'] = gr.Button('Continue')
+ shared.gradio['count_tokens-default'] = gr.Button('Count tokens')
with gr.Row():
- shared.gradio['prompt_menu'] = gr.Dropdown(choices=utils.get_available_prompts(), value='None', label='Prompt', elem_classes='slim-dropdown')
- ui.create_refresh_button(shared.gradio['prompt_menu'], lambda: None, lambda: {'choices': utils.get_available_prompts()}, 'refresh-button')
- shared.gradio['save_prompt'] = gr.Button('💾', elem_classes='refresh-button')
- shared.gradio['delete_prompt'] = gr.Button('🗑️', elem_classes='refresh-button')
+ shared.gradio['prompt_menu-default'] = gr.Dropdown(choices=utils.get_available_prompts(), value='None', label='Prompt', elem_classes='slim-dropdown')
+ ui.create_refresh_button(shared.gradio['prompt_menu-default'], lambda: None, lambda: {'choices': utils.get_available_prompts()}, 'refresh-button')
+ shared.gradio['save_prompt-default'] = gr.Button('💾', elem_classes='refresh-button')
+ shared.gradio['delete_prompt-default'] = gr.Button('🗑️', elem_classes='refresh-button')
- shared.gradio['status'] = gr.Markdown('')
+ shared.gradio['status-default'] = gr.Markdown('')
with gr.Column():
with gr.Tab('Raw'):
shared.gradio['output_textbox'] = gr.Textbox(lines=27, label='Output', elem_classes=['textbox_default_output', 'add_scrollbar'])
with gr.Tab('Markdown'):
- shared.gradio['markdown_render'] = gr.Button('Render')
- shared.gradio['markdown'] = gr.Markdown()
+ shared.gradio['markdown_render-default'] = gr.Button('Render')
+ shared.gradio['markdown-default'] = gr.Markdown()
with gr.Tab('HTML'):
- shared.gradio['html'] = gr.HTML()
+ shared.gradio['html-default'] = gr.HTML()
def create_event_handlers():
gen_events = []
- shared.input_params = gradio('textbox', 'interface_state')
- output_params = gradio('output_textbox', 'html')
- gen_events.append(shared.gradio['Generate'].click(
- lambda x: x, gradio('textbox'), gradio('last_input')).then(
+ gen_events.append(shared.gradio['Generate-default'].click(
+ lambda x: x, gradio('textbox-default'), gradio('last_input-default')).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
- generate_reply_wrapper, shared.input_params, output_params, show_progress=False).then(
+ generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
# lambda: None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}")
)
- gen_events.append(shared.gradio['textbox'].submit(
- lambda x: x, gradio('textbox'), gradio('last_input')).then(
+ gen_events.append(shared.gradio['textbox-default'].submit(
+ lambda x: x, gradio('textbox-default'), gradio('last_input-default')).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
- generate_reply_wrapper, shared.input_params, output_params, show_progress=False).then(
+ generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
# lambda: None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}")
)
- shared.gradio['markdown_render'].click(lambda x: x, gradio('output_textbox'), gradio('markdown'), queue=False)
- gen_events.append(shared.gradio['Continue'].click(
+ shared.gradio['markdown_render-default'].click(lambda x: x, gradio('output_textbox'), gradio('markdown-default'), queue=False)
+ gen_events.append(shared.gradio['Continue-default'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
- generate_reply_wrapper, [shared.gradio['output_textbox']] + shared.input_params[1:], output_params, show_progress=False).then(
+ generate_reply_wrapper, [shared.gradio['output_textbox']] + gradio(inputs)[1:], gradio(outputs), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
# lambda: None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[1]; element.scrollTop = element.scrollHeight}")
)
- shared.gradio['Stop'].click(stop_everything_event, None, None, queue=False, cancels=gen_events if shared.args.no_stream else None)
- shared.gradio['prompt_menu'].change(load_prompt, gradio('prompt_menu'), gradio('textbox'), show_progress=False)
- shared.gradio['save_prompt'].click(
- lambda x: x, gradio('textbox'), gradio('save_contents')).then(
+ shared.gradio['Stop-default'].click(stop_everything_event, None, None, queue=False, cancels=gen_events if shared.args.no_stream else None)
+ shared.gradio['prompt_menu-default'].change(load_prompt, gradio('prompt_menu-default'), gradio('textbox-default'), show_progress=False)
+ shared.gradio['save_prompt-default'].click(
+ lambda x: x, gradio('textbox-default'), gradio('save_contents')).then(
lambda: 'prompts/', None, gradio('save_root')).then(
lambda: utils.current_time() + '.txt', None, gradio('save_filename')).then(
lambda: gr.update(visible=True), None, gradio('file_saver'))
- shared.gradio['delete_prompt'].click(
+ shared.gradio['delete_prompt-default'].click(
lambda: 'prompts/', None, gradio('delete_root')).then(
- lambda x: x + '.txt', gradio('prompt_menu'), gradio('delete_filename')).then(
+ lambda x: x + '.txt', gradio('prompt_menu-default'), gradio('delete_filename')).then(
lambda: gr.update(visible=True), None, gradio('file_deleter'))
- shared.gradio['count_tokens'].click(count_tokens, gradio('textbox'), gradio('status'), show_progress=False)
+ shared.gradio['count_tokens-default'].click(count_tokens, gradio('textbox-default'), gradio('status-default'), show_progress=False)
diff --git a/modules/ui_file_saving.py b/modules/ui_file_saving.py
index 952d66c9..98165d67 100644
--- a/modules/ui_file_saving.py
+++ b/modules/ui_file_saving.py
@@ -2,7 +2,7 @@ import json
import gradio as gr
-from modules import chat, presets, shared, ui, utils
+from modules import chat, presets, shared, ui, ui_chat, utils
from modules.utils import gradio
@@ -26,18 +26,17 @@ def create_ui():
shared.gradio['delete_cancel'] = gr.Button('Cancel', elem_classes="small-button")
# Character saver/deleter
- if shared.is_chat():
- with gr.Box(visible=False, elem_classes='file-saver') as shared.gradio['character_saver']:
- shared.gradio['save_character_filename'] = gr.Textbox(lines=1, label='File name', info='The character will be saved to your characters/ folder with this base filename.')
- with gr.Row():
- shared.gradio['save_character_confirm'] = gr.Button('Save', elem_classes="small-button")
- shared.gradio['save_character_cancel'] = gr.Button('Cancel', elem_classes="small-button")
+ with gr.Box(visible=False, elem_classes='file-saver') as shared.gradio['character_saver']:
+ shared.gradio['save_character_filename'] = gr.Textbox(lines=1, label='File name', info='The character will be saved to your characters/ folder with this base filename.')
+ with gr.Row():
+ shared.gradio['save_character_confirm'] = gr.Button('Save', elem_classes="small-button")
+ shared.gradio['save_character_cancel'] = gr.Button('Cancel', elem_classes="small-button")
- with gr.Box(visible=False, elem_classes='file-saver') as shared.gradio['character_deleter']:
- gr.Markdown('Confirm the character deletion?')
- with gr.Row():
- shared.gradio['delete_character_confirm'] = gr.Button('Delete', elem_classes="small-button", variant='stop')
- shared.gradio['delete_character_cancel'] = gr.Button('Cancel', elem_classes="small-button")
+ with gr.Box(visible=False, elem_classes='file-saver') as shared.gradio['character_deleter']:
+ gr.Markdown('Confirm the character deletion?')
+ with gr.Row():
+ shared.gradio['delete_character_confirm'] = gr.Button('Delete', elem_classes="small-button", variant='stop')
+ shared.gradio['delete_character_cancel'] = gr.Button('Cancel', elem_classes="small-button")
def create_event_handlers():
@@ -51,18 +50,18 @@ def create_event_handlers():
shared.gradio['delete_cancel'].click(lambda: gr.update(visible=False), None, gradio('file_deleter'))
shared.gradio['save_cancel'].click(lambda: gr.update(visible=False), None, gradio('file_saver'))
- if shared.is_chat():
- shared.gradio['save_character_confirm'].click(
- chat.save_character, gradio('name2', 'greeting', 'context', 'character_picture', 'save_character_filename'), None).then(
- lambda: gr.update(visible=False), None, gradio('character_saver'))
- shared.gradio['delete_character_confirm'].click(
- chat.delete_character, gradio('character_menu'), None).then(
- lambda: gr.update(visible=False), None, gradio('character_deleter')).then(
- lambda: gr.update(choices=utils.get_available_characters()), None, gradio('character_menu'))
+ shared.gradio['save_character_confirm'].click(
+ chat.save_character, gradio('name2', 'greeting', 'context', 'character_picture', 'save_character_filename'), None).then(
+ lambda: gr.update(visible=False), None, gradio('character_saver'))
- shared.gradio['save_character_cancel'].click(lambda: gr.update(visible=False), None, gradio('character_saver'))
- shared.gradio['delete_character_cancel'].click(lambda: gr.update(visible=False), None, gradio('character_deleter'))
+ shared.gradio['delete_character_confirm'].click(
+ chat.delete_character, gradio('character_menu'), None).then(
+ lambda: gr.update(visible=False), None, gradio('character_deleter')).then(
+ lambda: gr.update(choices=utils.get_available_characters()), None, gradio('character_menu'))
+
+ shared.gradio['save_character_cancel'].click(lambda: gr.update(visible=False), None, gradio('character_saver'))
+ shared.gradio['delete_character_cancel'].click(lambda: gr.update(visible=False), None, gradio('character_deleter'))
shared.gradio['save_preset'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
@@ -80,28 +79,21 @@ def create_event_handlers():
shared.gradio['save_session'].click(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda x: json.dumps(x, indent=4), gradio('interface_state'), gradio('temporary_text')).then(
- None, gradio('temporary_text'), None, _js=f"(contents) => {{{ui.save_files_js}; saveSession(contents, \"{shared.get_mode()}\")}}")
+ None, gradio('temporary_text'), None, _js=f"(contents) => {{{ui.save_files_js}; saveSession(contents)}}")
- if shared.is_chat():
- shared.gradio['load_session'].upload(
- ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
- load_session, gradio('load_session', 'interface_state'), gradio('interface_state')).then(
- ui.apply_interface_values, gradio('interface_state'), gradio(ui.list_interface_input_elements()), show_progress=False).then(
- chat.redraw_html, shared.reload_inputs, gradio('display')).then(
- None, None, None, _js='() => {alert("The session has been loaded.")}')
- else:
- shared.gradio['load_session'].upload(
- ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
- load_session, gradio('load_session', 'interface_state'), gradio('interface_state')).then(
- ui.apply_interface_values, gradio('interface_state'), gradio(ui.list_interface_input_elements()), show_progress=False).then(
- None, None, None, _js='() => {alert("The session has been loaded.")}')
+ shared.gradio['load_session'].upload(
+ ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
+ load_session, gradio('load_session', 'interface_state'), gradio('interface_state')).then(
+ ui.apply_interface_values, gradio('interface_state'), gradio(ui.list_interface_input_elements()), show_progress=False).then(
+ chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display')).then(
+ None, None, None, _js='() => {alert("The session has been loaded.")}')
def load_session(file, state):
decoded_file = file if type(file) == str else file.decode('utf-8')
data = json.loads(decoded_file)
- if shared.is_chat() and 'character_menu' in data and state.get('character_menu') != data.get('character_menu'):
+ if 'character_menu' in data and state.get('character_menu') != data.get('character_menu'):
shared.session_is_loading = True
state.update(data)
diff --git a/modules/ui_notebook.py b/modules/ui_notebook.py
index 9e8b3af6..7d6648d2 100644
--- a/modules/ui_notebook.py
+++ b/modules/ui_notebook.py
@@ -8,90 +8,85 @@ from modules.text_generation import (
)
from modules.utils import gradio
+inputs = ('textbox-notebook', 'interface_state')
+outputs = ('textbox-notebook', 'html-notebook')
+
def create_ui():
default_text = load_prompt(shared.settings['prompt'])
- shared.gradio['interface_state'] = gr.State({k: None for k in shared.input_elements})
- shared.gradio['last_input'] = gr.State('')
- with gr.Tab('Text generation', elem_id='main'):
+ with gr.Tab('Notebook', elem_id='notebook-tab'):
+ shared.gradio['last_input-notebook'] = gr.State('')
with gr.Row():
with gr.Column(scale=4):
with gr.Tab('Raw'):
- shared.gradio['textbox'] = gr.Textbox(value=default_text, elem_classes=['textbox', 'add_scrollbar'], lines=27)
+ shared.gradio['textbox-notebook'] = gr.Textbox(value=default_text, elem_classes=['textbox', 'add_scrollbar'], lines=27)
with gr.Tab('Markdown'):
- shared.gradio['markdown_render'] = gr.Button('Render')
- shared.gradio['markdown'] = gr.Markdown()
+ shared.gradio['markdown_render-notebook'] = gr.Button('Render')
+ shared.gradio['markdown-notebook'] = gr.Markdown()
with gr.Tab('HTML'):
- shared.gradio['html'] = gr.HTML()
+ shared.gradio['html-notebook'] = gr.HTML()
with gr.Row():
- shared.gradio['Generate'] = gr.Button('Generate', variant='primary', elem_classes='small-button')
- shared.gradio['Stop'] = gr.Button('Stop', elem_classes='small-button', elem_id='stop')
+ shared.gradio['Generate-notebook'] = gr.Button('Generate', variant='primary', elem_classes='small-button')
+ shared.gradio['Stop-notebook'] = gr.Button('Stop', elem_classes='small-button', elem_id='stop')
shared.gradio['Undo'] = gr.Button('Undo', elem_classes='small-button')
- shared.gradio['Regenerate'] = gr.Button('Regenerate', elem_classes='small-button')
+ shared.gradio['Regenerate-notebook'] = gr.Button('Regenerate', elem_classes='small-button')
with gr.Column(scale=1):
gr.HTML('')
- shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
with gr.Row():
- shared.gradio['prompt_menu'] = gr.Dropdown(choices=utils.get_available_prompts(), value='None', label='Prompt', elem_classes='slim-dropdown')
- ui.create_refresh_button(shared.gradio['prompt_menu'], lambda: None, lambda: {'choices': utils.get_available_prompts()}, ['refresh-button', 'refresh-button-small'])
- shared.gradio['save_prompt'] = gr.Button('💾', elem_classes=['refresh-button', 'refresh-button-small'])
- shared.gradio['delete_prompt'] = gr.Button('🗑️', elem_classes=['refresh-button', 'refresh-button-small'])
+ shared.gradio['prompt_menu-notebook'] = gr.Dropdown(choices=utils.get_available_prompts(), value='None', label='Prompt', elem_classes='slim-dropdown')
+ ui.create_refresh_button(shared.gradio['prompt_menu-notebook'], lambda: None, lambda: {'choices': utils.get_available_prompts()}, ['refresh-button', 'refresh-button-small'])
+ shared.gradio['save_prompt-notebook'] = gr.Button('💾', elem_classes=['refresh-button', 'refresh-button-small'])
+ shared.gradio['delete_prompt-notebook'] = gr.Button('🗑️', elem_classes=['refresh-button', 'refresh-button-small'])
- shared.gradio['count_tokens'] = gr.Button('Count tokens')
- shared.gradio['status'] = gr.Markdown('')
+ shared.gradio['count_tokens-notebook'] = gr.Button('Count tokens')
+ shared.gradio['status-notebook'] = gr.Markdown('')
def create_event_handlers():
gen_events = []
- shared.input_params = gradio('textbox', 'interface_state')
- output_params = gradio('textbox', 'html')
-
- gen_events.append(shared.gradio['Generate'].click(
- lambda x: x, gradio('textbox'), gradio('last_input')).then(
+ gen_events.append(shared.gradio['Generate-notebook'].click(
+ lambda x: x, gradio('textbox-notebook'), gradio('last_input-notebook')).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
- generate_reply_wrapper, shared.input_params, output_params, show_progress=False).then(
+ generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
- # lambda: None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}")
)
- gen_events.append(shared.gradio['textbox'].submit(
- lambda x: x, gradio('textbox'), gradio('last_input')).then(
+ gen_events.append(shared.gradio['textbox-notebook'].submit(
+ lambda x: x, gradio('textbox-notebook'), gradio('last_input-notebook')).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
- generate_reply_wrapper, shared.input_params, output_params, show_progress=False).then(
+ generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
- # lambda: None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}")
)
- shared.gradio['Undo'].click(lambda x: x, gradio('last_input'), gradio('textbox'), show_progress=False)
- shared.gradio['markdown_render'].click(lambda x: x, gradio('textbox'), gradio('markdown'), queue=False)
- gen_events.append(shared.gradio['Regenerate'].click(
- lambda x: x, gradio('last_input'), gradio('textbox'), show_progress=False).then(
+ shared.gradio['Undo'].click(lambda x: x, gradio('last_input-notebook'), gradio('textbox-notebook'), show_progress=False)
+ shared.gradio['markdown_render-notebook'].click(lambda x: x, gradio('textbox-notebook'), gradio('markdown-notebook'), queue=False)
+ gen_events.append(shared.gradio['Regenerate-notebook'].click(
+ lambda x: x, gradio('last_input-notebook'), gradio('textbox-notebook'), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
- generate_reply_wrapper, shared.input_params, output_params, show_progress=False).then(
+ generate_reply_wrapper, gradio(inputs), gradio(outputs), show_progress=False).then(
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
lambda: None, None, None, _js=f'() => {{{ui.audio_notification_js}}}')
- # lambda: None, None, None, _js="() => {element = document.getElementsByTagName('textarea')[0]; element.scrollTop = element.scrollHeight}")
)
- shared.gradio['Stop'].click(stop_everything_event, None, None, queue=False, cancels=gen_events if shared.args.no_stream else None)
- shared.gradio['prompt_menu'].change(load_prompt, gradio('prompt_menu'), gradio('textbox'), show_progress=False)
- shared.gradio['save_prompt'].click(
- lambda x: x, gradio('textbox'), gradio('save_contents')).then(
+ shared.gradio['Stop-notebook'].click(stop_everything_event, None, None, queue=False, cancels=gen_events if shared.args.no_stream else None)
+ shared.gradio['prompt_menu-notebook'].change(load_prompt, gradio('prompt_menu-notebook'), gradio('textbox-notebook'), show_progress=False)
+ shared.gradio['save_prompt-notebook'].click(
+ lambda x: x, gradio('textbox-notebook'), gradio('save_contents')).then(
lambda: 'prompts/', None, gradio('save_root')).then(
lambda: utils.current_time() + '.txt', None, gradio('save_filename')).then(
lambda: gr.update(visible=True), None, gradio('file_saver'))
- shared.gradio['delete_prompt'].click(
+ shared.gradio['delete_prompt-notebook'].click(
lambda: 'prompts/', None, gradio('delete_root')).then(
- lambda x: x + '.txt', gradio('prompt_menu'), gradio('delete_filename')).then(
+ lambda x: x + '.txt', gradio('prompt_menu-notebook'), gradio('delete_filename')).then(
lambda: gr.update(visible=True), None, gradio('file_deleter'))
- shared.gradio['count_tokens'].click(count_tokens, gradio('textbox'), gradio('status'), show_progress=False)
+ shared.gradio['count_tokens-notebook'].click(count_tokens, gradio('textbox-notebook'), gradio('status-notebook'), show_progress=False)
diff --git a/modules/ui_parameters.py b/modules/ui_parameters.py
index 4b9fb918..2f0c2efd 100644
--- a/modules/ui_parameters.py
+++ b/modules/ui_parameters.py
@@ -1,143 +1,131 @@
import gradio as gr
-from modules import loaders, presets, shared, ui, utils
+from modules import loaders, presets, shared, ui, ui_chat, utils
from modules.utils import gradio
def create_ui(default_preset):
generate_params = presets.load_preset(default_preset)
with gr.Tab("Parameters", elem_id="parameters"):
- with gr.Row():
- with gr.Column():
- with gr.Row():
- shared.gradio['preset_menu'] = gr.Dropdown(choices=utils.get_available_presets(), value=default_preset, label='Generation parameters preset', elem_classes='slim-dropdown')
- ui.create_refresh_button(shared.gradio['preset_menu'], lambda: None, lambda: {'choices': utils.get_available_presets()}, 'refresh-button')
- shared.gradio['save_preset'] = gr.Button('💾', elem_classes='refresh-button')
- shared.gradio['delete_preset'] = gr.Button('🗑️', elem_classes='refresh-button')
-
- with gr.Column():
- shared.gradio['filter_by_loader'] = gr.Dropdown(label="Filter by loader", choices=["All"] + list(loaders.loaders_and_params.keys()), value="All", elem_classes='slim-dropdown')
-
- with gr.Row():
- with gr.Column():
- with gr.Box():
+ with gr.Tab("Generation"):
+ with gr.Row():
+ with gr.Column():
with gr.Row():
- with gr.Column():
- shared.gradio['temperature'] = gr.Slider(0.01, 1.99, value=generate_params['temperature'], step=0.01, label='temperature')
- shared.gradio['top_p'] = gr.Slider(0.0, 1.0, value=generate_params['top_p'], step=0.01, label='top_p')
- shared.gradio['top_k'] = gr.Slider(0, 200, value=generate_params['top_k'], step=1, label='top_k')
- shared.gradio['typical_p'] = gr.Slider(0.0, 1.0, value=generate_params['typical_p'], step=0.01, label='typical_p')
- shared.gradio['epsilon_cutoff'] = gr.Slider(0, 9, value=generate_params['epsilon_cutoff'], step=0.01, label='epsilon_cutoff')
- shared.gradio['eta_cutoff'] = gr.Slider(0, 20, value=generate_params['eta_cutoff'], step=0.01, label='eta_cutoff')
- shared.gradio['tfs'] = gr.Slider(0.0, 1.0, value=generate_params['tfs'], step=0.01, label='tfs')
- shared.gradio['top_a'] = gr.Slider(0.0, 1.0, value=generate_params['top_a'], step=0.01, label='top_a')
+ shared.gradio['preset_menu'] = gr.Dropdown(choices=utils.get_available_presets(), value=default_preset, label='Preset', elem_classes='slim-dropdown')
+ ui.create_refresh_button(shared.gradio['preset_menu'], lambda: None, lambda: {'choices': utils.get_available_presets()}, 'refresh-button')
+ shared.gradio['save_preset'] = gr.Button('💾', elem_classes='refresh-button')
+ shared.gradio['delete_preset'] = gr.Button('🗑️', elem_classes='refresh-button')
- with gr.Column():
- shared.gradio['repetition_penalty'] = gr.Slider(1.0, 1.5, value=generate_params['repetition_penalty'], step=0.01, label='repetition_penalty')
- shared.gradio['repetition_penalty_range'] = gr.Slider(0, 4096, step=64, value=generate_params['repetition_penalty_range'], label='repetition_penalty_range')
- shared.gradio['encoder_repetition_penalty'] = gr.Slider(0.8, 1.5, value=generate_params['encoder_repetition_penalty'], step=0.01, label='encoder_repetition_penalty')
- shared.gradio['no_repeat_ngram_size'] = gr.Slider(0, 20, step=1, value=generate_params['no_repeat_ngram_size'], label='no_repeat_ngram_size')
- shared.gradio['min_length'] = gr.Slider(0, 2000, step=1, value=generate_params['min_length'], label='min_length')
- shared.gradio['seed'] = gr.Number(value=shared.settings['seed'], label='Seed (-1 for random)')
- shared.gradio['do_sample'] = gr.Checkbox(value=generate_params['do_sample'], label='do_sample')
+ with gr.Column():
+ shared.gradio['filter_by_loader'] = gr.Dropdown(label="Filter by loader", choices=["All"] + list(loaders.loaders_and_params.keys()), value="All", elem_classes='slim-dropdown')
- with gr.Accordion("Learn more", open=False):
- gr.Markdown("""
+ with gr.Row():
+ with gr.Column():
+ with gr.Box():
+ with gr.Row():
+ with gr.Column():
+ shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
+ shared.gradio['temperature'] = gr.Slider(0.01, 1.99, value=generate_params['temperature'], step=0.01, label='temperature')
+ shared.gradio['top_p'] = gr.Slider(0.0, 1.0, value=generate_params['top_p'], step=0.01, label='top_p')
+ shared.gradio['top_k'] = gr.Slider(0, 200, value=generate_params['top_k'], step=1, label='top_k')
+ shared.gradio['typical_p'] = gr.Slider(0.0, 1.0, value=generate_params['typical_p'], step=0.01, label='typical_p')
+ shared.gradio['epsilon_cutoff'] = gr.Slider(0, 9, value=generate_params['epsilon_cutoff'], step=0.01, label='epsilon_cutoff')
+ shared.gradio['eta_cutoff'] = gr.Slider(0, 20, value=generate_params['eta_cutoff'], step=0.01, label='eta_cutoff')
+ shared.gradio['tfs'] = gr.Slider(0.0, 1.0, value=generate_params['tfs'], step=0.01, label='tfs')
+ shared.gradio['top_a'] = gr.Slider(0.0, 1.0, value=generate_params['top_a'], step=0.01, label='top_a')
- For a technical description of the parameters, the [transformers documentation](https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig) is a good reference.
+ with gr.Column():
+ shared.gradio['repetition_penalty'] = gr.Slider(1.0, 1.5, value=generate_params['repetition_penalty'], step=0.01, label='repetition_penalty')
+ shared.gradio['repetition_penalty_range'] = gr.Slider(0, 4096, step=64, value=generate_params['repetition_penalty_range'], label='repetition_penalty_range')
+ shared.gradio['encoder_repetition_penalty'] = gr.Slider(0.8, 1.5, value=generate_params['encoder_repetition_penalty'], step=0.01, label='encoder_repetition_penalty')
+ shared.gradio['no_repeat_ngram_size'] = gr.Slider(0, 20, step=1, value=generate_params['no_repeat_ngram_size'], label='no_repeat_ngram_size')
+ shared.gradio['min_length'] = gr.Slider(0, 2000, step=1, value=generate_params['min_length'], label='min_length')
+ shared.gradio['seed'] = gr.Number(value=shared.settings['seed'], label='Seed (-1 for random)')
+ shared.gradio['do_sample'] = gr.Checkbox(value=generate_params['do_sample'], label='do_sample')
- The best presets, according to the [Preset Arena](https://github.com/oobabooga/oobabooga.github.io/blob/main/arena/results.md) experiment, are:
+ with gr.Accordion("Learn more", open=False):
+ gr.Markdown("""
- * Instruction following:
- 1) Divine Intellect
- 2) Big O
- 3) simple-1
- 4) Space Alien
- 5) StarChat
- 6) Titanic
- 7) tfs-with-top-a
- 8) Asterism
- 9) Contrastive Search
+ For a technical description of the parameters, the [transformers documentation](https://huggingface.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig) is a good reference.
- * Chat:
- 1) Midnight Enigma
- 2) Yara
- 3) Shortwave
+ The best presets, according to the [Preset Arena](https://github.com/oobabooga/oobabooga.github.io/blob/main/arena/results.md) experiment, are:
- ### Temperature
- Primary factor to control randomness of outputs. 0 = deterministic (only the most likely token is used). Higher value = more randomness.
- ### top_p
- If not set to 1, select tokens with probabilities adding up to less than this number. Higher value = higher range of possible random results.
- ### top_k
- Similar to top_p, but select instead only the top_k most likely tokens. Higher value = higher range of possible random results.
- ### typical_p
- If not set to 1, select only tokens that are at least this much more likely to appear than random tokens, given the prior text.
- ### epsilon_cutoff
- In units of 1e-4; a reasonable value is 3. This sets a probability floor below which tokens are excluded from being sampled. Should be used with top_p, top_k, and eta_cutoff set to 0.
- ### eta_cutoff
- In units of 1e-4; a reasonable value is 3. Should be used with top_p, top_k, and epsilon_cutoff set to 0.
- ### repetition_penalty
- Exponential penalty factor for repeating prior tokens. 1 means no penalty, higher value = less repetition, lower value = more repetition.
- ### repetition_penalty_range
- The number of most recent tokens to consider for repetition penalty. 0 makes all tokens be used.
- ### encoder_repetition_penalty
- Also known as the "Hallucinations filter". Used to penalize tokens that are *not* in the prior text. Higher value = more likely to stay in context, lower value = more likely to diverge.
- ### no_repeat_ngram_size
- If not set to 0, specifies the length of token sets that are completely blocked from repeating at all. Higher values = blocks larger phrases, lower values = blocks words or letters from repeating. Only 0 or high values are a good idea in most cases.
- ### min_length
- Minimum generation length in tokens.
- ### penalty_alpha
- Contrastive Search is enabled by setting this to greater than zero and unchecking "do_sample". It should be used with a low value of top_k, for instance, top_k = 4.
+ * Instruction following:
+ 1) Divine Intellect
+ 2) Big O
+ 3) simple-1
+ 4) Space Alien
+ 5) StarChat
+ 6) Titanic
+ 7) tfs-with-top-a
+ 8) Asterism
+ 9) Contrastive Search
- """, elem_classes="markdown")
+ * Chat:
+ 1) Midnight Enigma
+ 2) Yara
+ 3) Shortwave
- with gr.Column():
- create_chat_settings_menus()
- with gr.Box():
- with gr.Row():
- with gr.Column():
- shared.gradio['guidance_scale'] = gr.Slider(-0.5, 2.5, step=0.05, value=generate_params['guidance_scale'], label='guidance_scale', info='For CFG. 1.5 is a good value.')
- shared.gradio['negative_prompt'] = gr.Textbox(value=shared.settings['negative_prompt'], label='Negative prompt')
- shared.gradio['mirostat_mode'] = gr.Slider(0, 2, step=1, value=generate_params['mirostat_mode'], label='mirostat_mode', info='mode=1 is for llama.cpp only.')
- shared.gradio['mirostat_tau'] = gr.Slider(0, 10, step=0.01, value=generate_params['mirostat_tau'], label='mirostat_tau')
- shared.gradio['mirostat_eta'] = gr.Slider(0, 1, step=0.01, value=generate_params['mirostat_eta'], label='mirostat_eta')
+ ### Temperature
+ Primary factor to control randomness of outputs. 0 = deterministic (only the most likely token is used). Higher value = more randomness.
+ ### top_p
+ If not set to 1, select tokens with probabilities adding up to less than this number. Higher value = higher range of possible random results.
+ ### top_k
+ Similar to top_p, but select instead only the top_k most likely tokens. Higher value = higher range of possible random results.
+ ### typical_p
+ If not set to 1, select only tokens that are at least this much more likely to appear than random tokens, given the prior text.
+ ### epsilon_cutoff
+ In units of 1e-4; a reasonable value is 3. This sets a probability floor below which tokens are excluded from being sampled. Should be used with top_p, top_k, and eta_cutoff set to 0.
+ ### eta_cutoff
+ In units of 1e-4; a reasonable value is 3. Should be used with top_p, top_k, and epsilon_cutoff set to 0.
+ ### repetition_penalty
+ Exponential penalty factor for repeating prior tokens. 1 means no penalty, higher value = less repetition, lower value = more repetition.
+ ### repetition_penalty_range
+ The number of most recent tokens to consider for repetition penalty. 0 makes all tokens be used.
+ ### encoder_repetition_penalty
+ Also known as the "Hallucinations filter". Used to penalize tokens that are *not* in the prior text. Higher value = more likely to stay in context, lower value = more likely to diverge.
+ ### no_repeat_ngram_size
+ If not set to 0, specifies the length of token sets that are completely blocked from repeating at all. Higher values = blocks larger phrases, lower values = blocks words or letters from repeating. Only 0 or high values are a good idea in most cases.
+ ### min_length
+ Minimum generation length in tokens.
+ ### penalty_alpha
+ Contrastive Search is enabled by setting this to greater than zero and unchecking "do_sample". It should be used with a low value of top_k, for instance, top_k = 4.
- with gr.Column():
- shared.gradio['penalty_alpha'] = gr.Slider(0, 5, value=generate_params['penalty_alpha'], label='penalty_alpha', info='For Contrastive Search. do_sample must be unchecked.')
+ """, elem_classes="markdown")
- shared.gradio['num_beams'] = gr.Slider(1, 20, step=1, value=generate_params['num_beams'], label='num_beams', info='For Beam Search, along with length_penalty and early_stopping.')
- shared.gradio['length_penalty'] = gr.Slider(-5, 5, value=generate_params['length_penalty'], label='length_penalty')
- shared.gradio['early_stopping'] = gr.Checkbox(value=generate_params['early_stopping'], label='early_stopping')
+ with gr.Column():
+ with gr.Box():
+ with gr.Row():
+ with gr.Column():
+ shared.gradio['guidance_scale'] = gr.Slider(-0.5, 2.5, step=0.05, value=generate_params['guidance_scale'], label='guidance_scale', info='For CFG. 1.5 is a good value.')
+ shared.gradio['negative_prompt'] = gr.Textbox(value=shared.settings['negative_prompt'], label='Negative prompt')
+ shared.gradio['mirostat_mode'] = gr.Slider(0, 2, step=1, value=generate_params['mirostat_mode'], label='mirostat_mode', info='mode=1 is for llama.cpp only.')
+ shared.gradio['mirostat_tau'] = gr.Slider(0, 10, step=0.01, value=generate_params['mirostat_tau'], label='mirostat_tau')
+ shared.gradio['mirostat_eta'] = gr.Slider(0, 1, step=0.01, value=generate_params['mirostat_eta'], label='mirostat_eta')
- with gr.Box():
- with gr.Row():
- with gr.Column():
- shared.gradio['truncation_length'] = gr.Slider(value=shared.settings['truncation_length'], minimum=shared.settings['truncation_length_min'], maximum=shared.settings['truncation_length_max'], step=256, label='Truncate the prompt up to this length', info='The leftmost tokens are removed if the prompt exceeds this length. Most models require this to be at most 2048.')
- shared.gradio['custom_stopping_strings'] = gr.Textbox(lines=1, value=shared.settings["custom_stopping_strings"] or None, label='Custom stopping strings', info='In addition to the defaults. Written between "" and separated by commas. For instance: "\\nYour Assistant:", "\\nThe assistant:"')
- with gr.Column():
- shared.gradio['auto_max_new_tokens'] = gr.Checkbox(value=shared.settings['auto_max_new_tokens'], label='auto_max_new_tokens', info='Expand max_new_tokens to the available context length.')
- shared.gradio['ban_eos_token'] = gr.Checkbox(value=shared.settings['ban_eos_token'], label='Ban the eos_token', info='Forces the model to never end the generation prematurely.')
- shared.gradio['add_bos_token'] = gr.Checkbox(value=shared.settings['add_bos_token'], label='Add the bos_token to the beginning of prompts', info='Disabling this can make the replies more creative.')
+ with gr.Column():
+ shared.gradio['penalty_alpha'] = gr.Slider(0, 5, value=generate_params['penalty_alpha'], label='penalty_alpha', info='For Contrastive Search. do_sample must be unchecked.')
- shared.gradio['skip_special_tokens'] = gr.Checkbox(value=shared.settings['skip_special_tokens'], label='Skip special tokens', info='Some specific models need this unset.')
- shared.gradio['stream'] = gr.Checkbox(value=not shared.args.no_stream, label='Activate text streaming')
+ shared.gradio['num_beams'] = gr.Slider(1, 20, step=1, value=generate_params['num_beams'], label='num_beams', info='For Beam Search, along with length_penalty and early_stopping.')
+ shared.gradio['length_penalty'] = gr.Slider(-5, 5, value=generate_params['length_penalty'], label='length_penalty')
+ shared.gradio['early_stopping'] = gr.Checkbox(value=generate_params['early_stopping'], label='early_stopping')
+
+ with gr.Box():
+ with gr.Row():
+ with gr.Column():
+ shared.gradio['truncation_length'] = gr.Slider(value=shared.settings['truncation_length'], minimum=shared.settings['truncation_length_min'], maximum=shared.settings['truncation_length_max'], step=256, label='Truncate the prompt up to this length', info='The leftmost tokens are removed if the prompt exceeds this length. Most models require this to be at most 2048.')
+ shared.gradio['custom_stopping_strings'] = gr.Textbox(lines=1, value=shared.settings["custom_stopping_strings"] or None, label='Custom stopping strings', info='In addition to the defaults. Written between "" and separated by commas. For instance: "\\nYour Assistant:", "\\nThe assistant:"')
+ with gr.Column():
+ shared.gradio['auto_max_new_tokens'] = gr.Checkbox(value=shared.settings['auto_max_new_tokens'], label='auto_max_new_tokens', info='Expand max_new_tokens to the available context length.')
+ shared.gradio['ban_eos_token'] = gr.Checkbox(value=shared.settings['ban_eos_token'], label='Ban the eos_token', info='Forces the model to never end the generation prematurely.')
+ shared.gradio['add_bos_token'] = gr.Checkbox(value=shared.settings['add_bos_token'], label='Add the bos_token to the beginning of prompts', info='Disabling this can make the replies more creative.')
+
+ shared.gradio['skip_special_tokens'] = gr.Checkbox(value=shared.settings['skip_special_tokens'], label='Skip special tokens', info='Some specific models need this unset.')
+ shared.gradio['stream'] = gr.Checkbox(value=not shared.args.no_stream, label='Activate text streaming')
+
+ ui_chat.create_chat_settings_ui()
def create_event_handlers():
shared.gradio['filter_by_loader'].change(loaders.blacklist_samplers, gradio('filter_by_loader'), gradio(loaders.list_all_samplers()), show_progress=False)
shared.gradio['preset_menu'].change(presets.load_preset_for_ui, gradio('preset_menu', 'interface_state'), gradio('interface_state') + gradio(presets.presets_params()))
-
-
-def create_chat_settings_menus():
- if not shared.is_chat():
- return
-
- with gr.Box():
- gr.Markdown("Chat parameters")
- with gr.Row():
- with gr.Column():
- shared.gradio['max_new_tokens'] = gr.Slider(minimum=shared.settings['max_new_tokens_min'], maximum=shared.settings['max_new_tokens_max'], step=1, label='max_new_tokens', value=shared.settings['max_new_tokens'])
- shared.gradio['chat_generation_attempts'] = gr.Slider(minimum=shared.settings['chat_generation_attempts_min'], maximum=shared.settings['chat_generation_attempts_max'], value=shared.settings['chat_generation_attempts'], step=1, label='Generation attempts (for longer replies)', info='New generations will be called until either this number is reached or no new content is generated between two iterations.')
-
- with gr.Column():
- shared.gradio['stop_at_newline'] = gr.Checkbox(value=shared.settings['stop_at_newline'], label='Stop generating at new line character')
diff --git a/modules/ui_session.py b/modules/ui_session.py
index 7a1a32b0..3d0fdac6 100644
--- a/modules/ui_session.py
+++ b/modules/ui_session.py
@@ -7,35 +7,21 @@ from modules.utils import gradio
def create_ui():
with gr.Tab("Session", elem_id="session-tab"):
- modes = ["default", "notebook", "chat"]
- current_mode = "default"
- for mode in modes[1:]:
- if getattr(shared.args, mode):
- current_mode = mode
- break
-
- cmd_list = vars(shared.args)
- bool_list = sorted([k for k in cmd_list if type(cmd_list[k]) is bool and k not in modes + ui.list_model_elements()])
- bool_active = [k for k in bool_list if vars(shared.args)[k]]
-
with gr.Row():
-
with gr.Column():
- with gr.Row():
- shared.gradio['interface_modes_menu'] = gr.Dropdown(choices=modes, value=current_mode, label="Mode", elem_classes='slim-dropdown')
- shared.gradio['reset_interface'] = gr.Button("Apply and restart", elem_classes="small-button", variant="primary")
- shared.gradio['toggle_dark_mode'] = gr.Button('Toggle 💡', elem_classes="small-button")
+ shared.gradio['reset_interface'] = gr.Button("Apply and restart")
+ shared.gradio['toggle_dark_mode'] = gr.Button('Toggle 💡')
with gr.Row():
with gr.Column():
shared.gradio['extensions_menu'] = gr.CheckboxGroup(choices=utils.get_available_extensions(), value=shared.args.extensions, label="Available extensions", info='Note that some of these extensions may require manually installing Python requirements through the command: pip install -r extensions/extension_name/requirements.txt', elem_classes='checkboxgroup-table')
with gr.Column():
- shared.gradio['bool_menu'] = gr.CheckboxGroup(choices=bool_list, value=bool_active, label="Boolean command-line flags", elem_classes='checkboxgroup-table')
+ shared.gradio['bool_menu'] = gr.CheckboxGroup(choices=get_boolean_arguments(), value=get_boolean_arguments(active=True), label="Boolean command-line flags", elem_classes='checkboxgroup-table')
with gr.Column():
if not shared.args.multi_user:
- shared.gradio['save_session'] = gr.Button('Save session', elem_id="save_session")
+ shared.gradio['save_session'] = gr.Button('Save session')
shared.gradio['load_session'] = gr.File(type='binary', file_types=['.json'], label="Upload Session JSON")
extension_name = gr.Textbox(lines=1, label='Install or update an extension', info='Enter the GitHub URL below and press Enter. For a list of extensions, see: https://github.com/oobabooga/text-generation-webui-extensions ⚠️ WARNING ⚠️ : extensions can execute arbitrary code. Make sure to inspect their source code before activating them.')
@@ -47,25 +33,33 @@ def create_ui():
# Reset interface event
shared.gradio['reset_interface'].click(
- set_interface_arguments, gradio('interface_modes_menu', 'extensions_menu', 'bool_menu'), None).then(
+ set_interface_arguments, gradio('extensions_menu', 'bool_menu'), None).then(
lambda: None, None, None, _js='() => {document.body.innerHTML=\'Reloading...
\'; setTimeout(function(){location.reload()},2500); return []}')
shared.gradio['toggle_dark_mode'].click(lambda: None, None, None, _js='() => {document.getElementsByTagName("body")[0].classList.toggle("dark")}')
-def set_interface_arguments(interface_mode, extensions, bool_active):
- modes = ["default", "notebook", "chat", "cai_chat"]
- cmd_list = vars(shared.args)
- bool_list = [k for k in cmd_list if type(cmd_list[k]) is bool and k not in modes]
-
+def set_interface_arguments(extensions, bool_active):
shared.args.extensions = extensions
- for k in modes[1:]:
- setattr(shared.args, k, False)
- if interface_mode != "default":
- setattr(shared.args, interface_mode, True)
+
+ bool_list = get_boolean_arguments()
+
for k in bool_list:
setattr(shared.args, k, False)
for k in bool_active:
setattr(shared.args, k, True)
shared.need_restart = True
+
+
+def get_boolean_arguments(active=False):
+ exclude = ["default", "notebook", "chat"]
+
+ cmd_list = vars(shared.args)
+ bool_list = sorted([k for k in cmd_list if type(cmd_list[k]) is bool and k not in exclude + ui.list_model_elements()])
+ bool_active = [k for k in bool_list if vars(shared.args)[k]]
+
+ if active:
+ return bool_active
+ else:
+ return bool_list
diff --git a/modules/utils.py b/modules/utils.py
index 011c71f1..6fa94730 100644
--- a/modules/utils.py
+++ b/modules/utils.py
@@ -9,7 +9,7 @@ from modules.logging_colors import logger
# Helper function to get multiple values from shared.gradio
def gradio(*keys):
- if len(keys) == 1 and type(keys[0]) is list:
+ if len(keys) == 1 and type(keys[0]) in [list, tuple]:
keys = keys[0]
return [shared.gradio[k] for k in keys]
diff --git a/server.py b/server.py
index b477d4c1..e86e3338 100644
--- a/server.py
+++ b/server.py
@@ -69,28 +69,28 @@ def create_interface():
# Force some events to be triggered on page load
shared.persistent_interface_state.update({
'loader': shared.args.loader or 'Transformers',
+ 'mode': shared.settings['mode'],
+ 'character_menu': shared.args.character or shared.settings['character'],
+ 'instruction_template': shared.settings['instruction_template']
})
- if shared.is_chat():
- shared.persistent_interface_state.update({
- 'mode': shared.settings['mode'],
- 'character_menu': shared.args.character or shared.settings['character'],
- 'instruction_template': shared.settings['instruction_template']
- })
- if Path("cache/pfp_character.png").exists():
- Path("cache/pfp_character.png").unlink()
+ if Path("cache/pfp_character.png").exists():
+ Path("cache/pfp_character.png").unlink()
# css/js strings
- css = ui.css if not shared.is_chat() else ui.css + ui.chat_css
- js = ui.main_js
+ css = ui.css
+ js = ui.js
css += apply_extensions('css')
js += apply_extensions('js')
- # The input elements for the generation functions
+ # Interface state elements
shared.input_elements = ui.list_interface_input_elements()
with gr.Blocks(css=css, analytics_enabled=False, title=title, theme=ui.theme) as shared.gradio['interface']:
+ # Interface state
+ shared.gradio['interface_state'] = gr.State({k: None for k in shared.input_elements})
+
# Audio notification
if Path("notification.mp3").exists():
shared.gradio['audio_notification'] = gr.Audio(interactive=False, value="notification.mp3", elem_id="audio_notification", visible=False)
@@ -102,12 +102,9 @@ def create_interface():
shared.gradio['temporary_text'] = gr.Textbox(visible=False)
# Text Generation tab
- if shared.is_chat():
- ui_chat.create_ui()
- elif shared.args.notebook:
- ui_notebook.create_ui()
- else:
- ui_default.create_ui()
+ ui_chat.create_ui()
+ ui_default.create_ui()
+ ui_notebook.create_ui()
ui_parameters.create_ui(shared.settings['preset']) # Parameters tab
ui_model_menu.create_ui() # Model tab
@@ -115,12 +112,9 @@ def create_interface():
ui_session.create_ui() # Session tab
# Generation events
- if shared.is_chat():
- ui_chat.create_event_handlers()
- elif shared.args.notebook:
- ui_notebook.create_event_handlers()
- else:
- ui_default.create_event_handlers()
+ ui_chat.create_event_handlers()
+ ui_default.create_event_handlers()
+ ui_notebook.create_event_handlers()
# Other events
ui_file_saving.create_event_handlers()
@@ -130,11 +124,10 @@ def create_interface():
# Interface launch events
if shared.settings['dark_theme']:
shared.gradio['interface'].load(lambda: None, None, None, _js="() => document.getElementsByTagName('body')[0].classList.add('dark')")
-
+
shared.gradio['interface'].load(lambda: None, None, None, _js=f"() => {{{js}}}")
shared.gradio['interface'].load(partial(ui.apply_interface_values, {}, use_persistent=True), None, gradio(ui.list_interface_input_elements()), show_progress=False)
- if shared.is_chat():
- shared.gradio['interface'].load(chat.redraw_html, shared.reload_inputs, gradio('display'))
+ shared.gradio['interface'].load(chat.redraw_html, gradio(ui_chat.reload_arr), gradio('display'))
extensions_module.create_extensions_tabs() # Extensions tabs
extensions_module.create_extensions_block() # Extensions block
@@ -190,16 +183,10 @@ if __name__ == "__main__":
# Activate the extensions listed on settings.yaml
extensions_module.available_extensions = utils.get_available_extensions()
- if shared.is_chat():
- for extension in shared.settings['chat_default_extensions']:
- shared.args.extensions = shared.args.extensions or []
- if extension not in shared.args.extensions:
- shared.args.extensions.append(extension)
- else:
- for extension in shared.settings['default_extensions']:
- shared.args.extensions = shared.args.extensions or []
- if extension not in shared.args.extensions:
- shared.args.extensions.append(extension)
+ for extension in shared.settings['default_extensions']:
+ shared.args.extensions = shared.args.extensions or []
+ if extension not in shared.args.extensions:
+ shared.args.extensions.append(extension)
available_models = utils.get_available_models()
diff --git a/settings-template.yaml b/settings-template.yaml
index a0c53b33..b1d63c71 100644
--- a/settings-template.yaml
+++ b/settings-template.yaml
@@ -13,7 +13,6 @@ context: This is a conversation with your Assistant. It is a computer program de
greeting: ''
turn_template: ''
custom_stopping_strings: ''
-stop_at_newline: false
add_bos_token: true
ban_eos_token: false
skip_special_tokens: true
@@ -28,11 +27,7 @@ chat-instruct_command: |-
Continue the chat dialogue below. Write a single reply for the character "<|character|>".
<|prompt|>
-chat_generation_attempts: 1
-chat_generation_attempts_min: 1
-chat_generation_attempts_max: 10
-default_extensions: []
-chat_default_extensions:
+default_extensions:
- gallery
preset: simple-1
prompt: QA