mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 08:07:56 +01:00
Add "save defaults to settings.yaml" button (#3574)
This commit is contained in:
parent
a95e6f02cb
commit
619cb4e78b
@ -196,7 +196,6 @@ Optionally, you can use the following command-line flags:
|
||||
| `--model-dir MODEL_DIR` | Path to directory with all the models. |
|
||||
| `--lora-dir LORA_DIR` | Path to directory with all the loras. |
|
||||
| `--model-menu` | Show a model menu in the terminal when the web UI is first launched. |
|
||||
| `--no-stream` | Don't stream the text output in real time. |
|
||||
| `--settings SETTINGS_FILE` | Load the default interface settings from this yaml file. See `settings-template.yaml` for an example. If you create a file called `settings.yaml`, this file will be loaded by default without the need to use the `--settings` flag. |
|
||||
| `--extensions EXTENSIONS [EXTENSIONS ...]` | The list of extensions to load. If you want to load more than one extension, write the names separated by spaces. |
|
||||
| `--verbose` | Print the prompts to the terminal. |
|
||||
|
@ -261,7 +261,7 @@ def chatbot_wrapper(text, state, regenerate=False, _continue=False, loading_mess
|
||||
yield output
|
||||
|
||||
|
||||
def impersonate_wrapper(text, start_with, state):
|
||||
def impersonate_wrapper(text, state):
|
||||
if shared.model_name == 'None' or shared.model is None:
|
||||
logger.error("No model is loaded! Select one in the Model tab.")
|
||||
yield ''
|
||||
@ -291,15 +291,15 @@ def generate_chat_reply(text, state, regenerate=False, _continue=False, loading_
|
||||
|
||||
|
||||
# Same as above but returns HTML for the UI
|
||||
def generate_chat_reply_wrapper(text, start_with, state, regenerate=False, _continue=False):
|
||||
if start_with != '' and not _continue:
|
||||
def generate_chat_reply_wrapper(text, state, regenerate=False, _continue=False):
|
||||
if state['start_with'] != '' and not _continue:
|
||||
if regenerate:
|
||||
text, state['history'] = remove_last_message(state['history'])
|
||||
regenerate = False
|
||||
|
||||
_continue = True
|
||||
send_dummy_message(text, state)
|
||||
send_dummy_reply(start_with, state)
|
||||
send_dummy_reply(state['start_with'], state)
|
||||
|
||||
for i, history in enumerate(generate_chat_reply(text, state, regenerate, _continue, loading_message=True)):
|
||||
yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style']), history
|
||||
|
@ -29,34 +29,35 @@ session_is_loading = False
|
||||
# UI defaults
|
||||
settings = {
|
||||
'dark_theme': True,
|
||||
'autoload_model': False,
|
||||
'start_with': '',
|
||||
'mode': 'chat',
|
||||
'chat_style': 'TheEncrypted777',
|
||||
'character': 'None',
|
||||
'prompt-default': 'QA',
|
||||
'prompt-notebook': 'QA',
|
||||
'preset': 'simple-1',
|
||||
'max_new_tokens': 200,
|
||||
'max_new_tokens_min': 1,
|
||||
'max_new_tokens_max': 4096,
|
||||
'auto_max_new_tokens': False,
|
||||
'seed': -1,
|
||||
'negative_prompt': '',
|
||||
'character': 'None',
|
||||
'truncation_length': 2048,
|
||||
'truncation_length_min': 0,
|
||||
'truncation_length_max': 16384,
|
||||
'custom_stopping_strings': '',
|
||||
'auto_max_new_tokens': False,
|
||||
'ban_eos_token': False,
|
||||
'add_bos_token': True,
|
||||
'skip_special_tokens': True,
|
||||
'stream': True,
|
||||
'name1': 'You',
|
||||
'name2': 'Assistant',
|
||||
'context': 'This is a conversation with your Assistant. It is a computer program designed to help you with various tasks such as answering questions, providing recommendations, and helping with decision making. You can ask it anything you want and it will do its best to give you accurate and relevant information.',
|
||||
'greeting': '',
|
||||
'turn_template': '',
|
||||
'custom_stopping_strings': '',
|
||||
'add_bos_token': True,
|
||||
'ban_eos_token': False,
|
||||
'skip_special_tokens': True,
|
||||
'truncation_length': 2048,
|
||||
'truncation_length_min': 0,
|
||||
'truncation_length_max': 16384,
|
||||
'mode': 'chat',
|
||||
'start_with': '',
|
||||
'chat_style': 'TheEncrypted777',
|
||||
'instruction_template': 'None',
|
||||
'chat-instruct_command': 'Continue the chat dialogue below. Write a single reply for the character "<|character|>".\n\n<|prompt|>',
|
||||
'autoload_model': False,
|
||||
'default_extensions': ['gallery'],
|
||||
'preset': 'simple-1',
|
||||
'prompt': 'QA',
|
||||
}
|
||||
|
||||
|
||||
@ -83,7 +84,7 @@ parser.add_argument('--lora', type=str, nargs="+", help='The list of LoRAs to lo
|
||||
parser.add_argument("--model-dir", type=str, default='models/', help="Path to directory with all the models")
|
||||
parser.add_argument("--lora-dir", type=str, default='loras/', help="Path to directory with all the loras")
|
||||
parser.add_argument('--model-menu', action='store_true', help='Show a model menu in the terminal when the web UI is first launched.')
|
||||
parser.add_argument('--no-stream', action='store_true', help='Don\'t stream the text output in real time.')
|
||||
parser.add_argument('--no-stream', action='store_true', help='DEPRECATED')
|
||||
parser.add_argument('--settings', type=str, help='Load the default interface settings from this yaml file. See settings-template.yaml for an example. If you create a file called settings.yaml, this file will be loaded by default without the need to use the --settings flag.')
|
||||
parser.add_argument('--extensions', type=str, nargs="+", help='The list of extensions to load. If you want to load more than one extension, write the names separated by spaces.')
|
||||
parser.add_argument('--verbose', action='store_true', help='Print the prompts to the terminal.')
|
||||
@ -181,7 +182,7 @@ args = parser.parse_args()
|
||||
args_defaults = parser.parse_args([])
|
||||
|
||||
# Deprecation warnings
|
||||
for k in ['chat', 'notebook']:
|
||||
for k in ['chat', 'notebook', 'no_stream']:
|
||||
if getattr(args, k):
|
||||
logger.warning(f'--{k} has been deprecated and will be removed soon. Please remove that flag.')
|
||||
|
||||
|
@ -1,7 +1,9 @@
|
||||
import copy
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
import torch
|
||||
import yaml
|
||||
|
||||
from modules import shared
|
||||
|
||||
@ -119,6 +121,7 @@ def list_interface_input_elements():
|
||||
# Chat elements
|
||||
elements += [
|
||||
'textbox',
|
||||
'start_with',
|
||||
'character_menu',
|
||||
'history',
|
||||
'name1',
|
||||
@ -139,7 +142,9 @@ def list_interface_input_elements():
|
||||
elements += [
|
||||
'textbox-notebook',
|
||||
'textbox-default',
|
||||
'output_textbox'
|
||||
'output_textbox',
|
||||
'prompt_menu-default',
|
||||
'prompt_menu-notebook',
|
||||
]
|
||||
|
||||
# Model elements
|
||||
@ -170,6 +175,24 @@ def apply_interface_values(state, use_persistent=False):
|
||||
return [state[k] if k in state else gr.update() for k in elements]
|
||||
|
||||
|
||||
def save_settings(state, preset, instruction_template, extensions):
|
||||
output = copy.deepcopy(shared.settings)
|
||||
exclude = ['name1', 'name2', 'greeting', 'context', 'turn_template']
|
||||
for k in state:
|
||||
if k in shared.settings and k not in exclude:
|
||||
output[k] = state[k]
|
||||
|
||||
output['preset'] = preset
|
||||
output['prompt-default'] = state['prompt_menu-default']
|
||||
output['prompt-notebook'] = state['prompt_menu-notebook']
|
||||
output['character'] = state['character_menu']
|
||||
output['instruction_template'] = instruction_template
|
||||
output['default_extensions'] = extensions
|
||||
output['seed'] = int(output['seed'])
|
||||
|
||||
return yaml.dump(output, sort_keys=False, width=float("inf"))
|
||||
|
||||
|
||||
class ToolButton(gr.Button, gr.components.IOComponent):
|
||||
"""
|
||||
Small button with single emoji as text, fits inside gradio forms
|
||||
|
@ -10,7 +10,7 @@ from modules.html_generator import chat_html_wrapper
|
||||
from modules.text_generation import stop_everything_event
|
||||
from modules.utils import gradio
|
||||
|
||||
inputs = ('Chat input', 'start_with', 'interface_state')
|
||||
inputs = ('Chat input', 'interface_state')
|
||||
reload_arr = ('history', 'name1', 'name2', 'mode', 'chat_style')
|
||||
clear_arr = ('Clear history-confirm', 'Clear history', 'Clear history-cancel')
|
||||
|
||||
@ -82,7 +82,7 @@ def create_chat_settings_ui():
|
||||
shared.gradio['name1_instruct'] = gr.Textbox(value='', lines=2, label='User string')
|
||||
shared.gradio['name2_instruct'] = gr.Textbox(value='', lines=1, label='Bot string')
|
||||
shared.gradio['context_instruct'] = gr.Textbox(value='', lines=4, label='Context')
|
||||
shared.gradio['turn_template'] = gr.Textbox(value=shared.settings['turn_template'], lines=1, label='Turn template', info='Used to precisely define the placement of spaces and new line characters in instruction prompts.')
|
||||
shared.gradio['turn_template'] = gr.Textbox(value='', lines=1, label='Turn template', info='Used to precisely define the placement of spaces and new line characters in instruction prompts.')
|
||||
with gr.Row():
|
||||
shared.gradio['send_instruction_to_default'] = gr.Button('Send to default', elem_classes=['small-button'])
|
||||
shared.gradio['send_instruction_to_notebook'] = gr.Button('Send to notebook', elem_classes=['small-button'])
|
||||
|
@ -13,13 +13,11 @@ outputs = ('output_textbox', 'html-default')
|
||||
|
||||
|
||||
def create_ui():
|
||||
default_text = load_prompt(shared.settings['prompt'])
|
||||
|
||||
with gr.Tab('Default', elem_id='default-tab'):
|
||||
shared.gradio['last_input-default'] = gr.State('')
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
shared.gradio['textbox-default'] = gr.Textbox(value=default_text, elem_classes=['textbox_default', 'add_scrollbar'], lines=27, label='Input')
|
||||
shared.gradio['textbox-default'] = gr.Textbox(value='', elem_classes=['textbox_default', 'add_scrollbar'], lines=27, label='Input')
|
||||
with gr.Row():
|
||||
shared.gradio['Generate-default'] = gr.Button('Generate', variant='primary')
|
||||
shared.gradio['Stop-default'] = gr.Button('Stop', elem_id='stop')
|
||||
|
@ -13,14 +13,12 @@ outputs = ('textbox-notebook', 'html-notebook')
|
||||
|
||||
|
||||
def create_ui():
|
||||
default_text = load_prompt(shared.settings['prompt'])
|
||||
|
||||
with gr.Tab('Notebook', elem_id='notebook-tab'):
|
||||
shared.gradio['last_input-notebook'] = gr.State('')
|
||||
with gr.Row():
|
||||
with gr.Column(scale=4):
|
||||
with gr.Tab('Raw'):
|
||||
shared.gradio['textbox-notebook'] = gr.Textbox(value=default_text, elem_classes=['textbox', 'add_scrollbar'], lines=27)
|
||||
shared.gradio['textbox-notebook'] = gr.Textbox(value='', elem_classes=['textbox', 'add_scrollbar'], lines=27)
|
||||
|
||||
with gr.Tab('Markdown'):
|
||||
shared.gradio['markdown_render-notebook'] = gr.Button('Render')
|
||||
|
@ -121,7 +121,7 @@ def create_ui(default_preset):
|
||||
shared.gradio['add_bos_token'] = gr.Checkbox(value=shared.settings['add_bos_token'], label='Add the bos_token to the beginning of prompts', info='Disabling this can make the replies more creative.')
|
||||
|
||||
shared.gradio['skip_special_tokens'] = gr.Checkbox(value=shared.settings['skip_special_tokens'], label='Skip special tokens', info='Some specific models need this unset.')
|
||||
shared.gradio['stream'] = gr.Checkbox(value=not shared.args.no_stream, label='Activate text streaming')
|
||||
shared.gradio['stream'] = gr.Checkbox(value=shared.settings['stream'], label='Activate text streaming')
|
||||
|
||||
ui_chat.create_chat_settings_ui()
|
||||
|
||||
|
@ -9,8 +9,10 @@ def create_ui():
|
||||
with gr.Tab("Session", elem_id="session-tab"):
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
shared.gradio['reset_interface'] = gr.Button("Apply and restart")
|
||||
shared.gradio['toggle_dark_mode'] = gr.Button('Toggle 💡')
|
||||
shared.gradio['reset_interface'] = gr.Button("Apply flags/extensions and restart")
|
||||
with gr.Row():
|
||||
shared.gradio['toggle_dark_mode'] = gr.Button('Toggle 💡')
|
||||
shared.gradio['save_settings'] = gr.Button('Save UI defaults to settings.yaml')
|
||||
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
@ -37,6 +39,12 @@ def create_ui():
|
||||
lambda: None, None, None, _js='() => {document.body.innerHTML=\'<h1 style="font-family:monospace;padding-top:20%;margin:0;height:100vh;color:lightgray;text-align:center;background:var(--body-background-fill)">Reloading...</h1>\'; setTimeout(function(){location.reload()},2500); return []}')
|
||||
|
||||
shared.gradio['toggle_dark_mode'].click(lambda: None, None, None, _js='() => {document.getElementsByTagName("body")[0].classList.toggle("dark")}')
|
||||
shared.gradio['save_settings'].click(
|
||||
ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
|
||||
ui.save_settings, gradio('interface_state', 'preset_menu', 'instruction_template', 'extensions_menu'), gradio('save_contents')).then(
|
||||
lambda: './', None, gradio('save_root')).then(
|
||||
lambda: 'settings.yaml', None, gradio('save_filename')).then(
|
||||
lambda: gr.update(visible=True), None, gradio('file_saver'))
|
||||
|
||||
|
||||
def set_interface_arguments(extensions, bool_active):
|
||||
|
@ -71,7 +71,9 @@ def create_interface():
|
||||
'loader': shared.args.loader or 'Transformers',
|
||||
'mode': shared.settings['mode'],
|
||||
'character_menu': shared.args.character or shared.settings['character'],
|
||||
'instruction_template': shared.settings['instruction_template']
|
||||
'instruction_template': shared.settings['instruction_template'],
|
||||
'prompt_menu-default': shared.settings['prompt-default'],
|
||||
'prompt_menu-notebook': shared.settings['prompt-notebook'],
|
||||
})
|
||||
|
||||
if Path("cache/pfp_character.png").exists():
|
||||
|
@ -1,33 +1,34 @@
|
||||
dark_theme: true
|
||||
autoload_model: false
|
||||
start_with: ''
|
||||
mode: chat
|
||||
chat_style: TheEncrypted777
|
||||
character: None
|
||||
prompt-default: QA
|
||||
prompt-notebook: QA
|
||||
preset: simple-1
|
||||
max_new_tokens: 200
|
||||
max_new_tokens_min: 1
|
||||
max_new_tokens_max: 4096
|
||||
auto_max_new_tokens: false
|
||||
seed: -1
|
||||
negative_prompt: ''
|
||||
character: None
|
||||
truncation_length: 2048
|
||||
truncation_length_min: 0
|
||||
truncation_length_max: 16384
|
||||
custom_stopping_strings: ''
|
||||
auto_max_new_tokens: false
|
||||
ban_eos_token: false
|
||||
add_bos_token: true
|
||||
skip_special_tokens: true
|
||||
stream: true
|
||||
name1: You
|
||||
name2: Assistant
|
||||
context: This is a conversation with your Assistant. It is a computer program designed to help you with various tasks such as answering questions, providing recommendations, and helping with decision making. You can ask it anything you want and it will do its best to give you accurate and relevant information.
|
||||
greeting: ''
|
||||
turn_template: ''
|
||||
custom_stopping_strings: ''
|
||||
add_bos_token: true
|
||||
ban_eos_token: false
|
||||
skip_special_tokens: true
|
||||
truncation_length: 2048
|
||||
truncation_length_min: 0
|
||||
truncation_length_max: 16384
|
||||
mode: chat
|
||||
start_with: ''
|
||||
chat_style: TheEncrypted777
|
||||
instruction_template: None
|
||||
chat-instruct_command: |-
|
||||
Continue the chat dialogue below. Write a single reply for the character "<|character|>".
|
||||
|
||||
<|prompt|>
|
||||
autoload_model: false
|
||||
default_extensions:
|
||||
- gallery
|
||||
preset: simple-1
|
||||
prompt: QA
|
||||
|
Loading…
Reference in New Issue
Block a user