mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-21 23:57:58 +01:00
Don't hardcode prompts in the settings dict/json
This commit is contained in:
parent
1cb9246160
commit
55755e27b9
@ -41,19 +41,18 @@ settings = {
|
|||||||
'chat_default_extensions': ["gallery"],
|
'chat_default_extensions': ["gallery"],
|
||||||
'presets': {
|
'presets': {
|
||||||
'default': 'NovelAI-Sphinx Moth',
|
'default': 'NovelAI-Sphinx Moth',
|
||||||
'pygmalion-*': 'Pygmalion',
|
'.*pygmalion': 'Pygmalion',
|
||||||
'RWKV-*': 'Naive',
|
'.*RWKV': 'Naive',
|
||||||
},
|
},
|
||||||
'prompts': {
|
'prompts': {
|
||||||
'default': 'Common sense questions and answers\n\nQuestion: \nFactual answer:',
|
'default': 'QA',
|
||||||
'^(gpt4chan|gpt-4chan|4chan)': '-----\n--- 865467536\nInput text\n--- 865467537\n',
|
'.*(gpt4chan|gpt-4chan|4chan)': 'GPT-4chan',
|
||||||
'(rosey|chip|joi)_.*_instruct.*': 'User: \n',
|
'.*oasst': 'Open Assistant',
|
||||||
'oasst-*': '<|prompter|>Write a story about future of AI development<|endoftext|><|assistant|>',
|
'.*alpaca': "Alpaca",
|
||||||
'alpaca-*': "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n### Instruction:\nWrite a poem about the transformers Python library. \nMention the word \"large language models\" in that poem.\n### Response:\n",
|
|
||||||
},
|
},
|
||||||
'lora_prompts': {
|
'lora_prompts': {
|
||||||
'default': 'Common sense questions and answers\n\nQuestion: \nFactual answer:',
|
'default': 'QA',
|
||||||
'(alpaca-lora-7b|alpaca-lora-13b|alpaca-lora-30b)': "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n### Instruction:\nWrite a poem about the transformers Python library. \nMention the word \"large language models\" in that poem.\n### Response:\n"
|
'.*(alpaca-lora-7b|alpaca-lora-13b|alpaca-lora-30b)': "Alpaca",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
6
prompts/GPT-4chan.txt
Normal file
6
prompts/GPT-4chan.txt
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
-----
|
||||||
|
--- 865467536
|
||||||
|
Hello, AI frens!
|
||||||
|
How are you doing on this fine day?
|
||||||
|
--- 865467537
|
||||||
|
|
16
server.py
16
server.py
@ -73,9 +73,7 @@ def load_model_wrapper(selected_model):
|
|||||||
|
|
||||||
def load_lora_wrapper(selected_lora):
|
def load_lora_wrapper(selected_lora):
|
||||||
add_lora_to_model(selected_lora)
|
add_lora_to_model(selected_lora)
|
||||||
default_text = shared.settings['lora_prompts'][next((k for k in shared.settings['lora_prompts'] if re.match(k.lower(), shared.lora_name.lower())), 'default')]
|
return selected_lora
|
||||||
|
|
||||||
return selected_lora, default_text
|
|
||||||
|
|
||||||
def load_preset_values(preset_menu, return_dict=False):
|
def load_preset_values(preset_menu, return_dict=False):
|
||||||
generate_params = {
|
generate_params = {
|
||||||
@ -141,7 +139,10 @@ def load_prompt(fname):
|
|||||||
return ''
|
return ''
|
||||||
else:
|
else:
|
||||||
with open(Path(f'prompts/{fname}.txt'), 'r', encoding='utf-8') as f:
|
with open(Path(f'prompts/{fname}.txt'), 'r', encoding='utf-8') as f:
|
||||||
return f.read()
|
text = f.read()
|
||||||
|
if text[-1] == '\n':
|
||||||
|
text = text[:-1]
|
||||||
|
return text
|
||||||
|
|
||||||
def create_prompt_menus():
|
def create_prompt_menus():
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
@ -212,7 +213,7 @@ def create_settings_menus(default_preset):
|
|||||||
|
|
||||||
shared.gradio['model_menu'].change(load_model_wrapper, [shared.gradio['model_menu']], [shared.gradio['model_menu']], show_progress=True)
|
shared.gradio['model_menu'].change(load_model_wrapper, [shared.gradio['model_menu']], [shared.gradio['model_menu']], show_progress=True)
|
||||||
shared.gradio['preset_menu'].change(load_preset_values, [shared.gradio['preset_menu']], [shared.gradio[k] for k in ['do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']])
|
shared.gradio['preset_menu'].change(load_preset_values, [shared.gradio['preset_menu']], [shared.gradio[k] for k in ['do_sample', 'temperature', 'top_p', 'typical_p', 'repetition_penalty', 'encoder_repetition_penalty', 'top_k', 'min_length', 'no_repeat_ngram_size', 'num_beams', 'penalty_alpha', 'length_penalty', 'early_stopping']])
|
||||||
shared.gradio['lora_menu'].change(load_lora_wrapper, [shared.gradio['lora_menu']], [shared.gradio['lora_menu'], shared.gradio['textbox']], show_progress=True)
|
shared.gradio['lora_menu'].change(load_lora_wrapper, [shared.gradio['lora_menu']], [shared.gradio['lora_menu']], show_progress=True)
|
||||||
shared.gradio['softprompts_menu'].change(load_soft_prompt, [shared.gradio['softprompts_menu']], [shared.gradio['softprompts_menu']], show_progress=True)
|
shared.gradio['softprompts_menu'].change(load_soft_prompt, [shared.gradio['softprompts_menu']], [shared.gradio['softprompts_menu']], show_progress=True)
|
||||||
shared.gradio['upload_softprompt'].upload(upload_soft_prompt, [shared.gradio['upload_softprompt']], [shared.gradio['softprompts_menu']])
|
shared.gradio['upload_softprompt'].upload(upload_soft_prompt, [shared.gradio['upload_softprompt']], [shared.gradio['softprompts_menu']])
|
||||||
|
|
||||||
@ -277,11 +278,10 @@ if shared.args.lora:
|
|||||||
# Default UI settings
|
# Default UI settings
|
||||||
default_preset = shared.settings['presets'][next((k for k in shared.settings['presets'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
|
default_preset = shared.settings['presets'][next((k for k in shared.settings['presets'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
|
||||||
if shared.lora_name != "None":
|
if shared.lora_name != "None":
|
||||||
default_text = shared.settings['lora_prompts'][next((k for k in shared.settings['lora_prompts'] if re.match(k.lower(), shared.lora_name.lower())), 'default')]
|
default_text = load_prompt(shared.settings['lora_prompts'][next((k for k in shared.settings['lora_prompts'] if re.match(k.lower(), shared.lora_name.lower())), 'default')])
|
||||||
else:
|
else:
|
||||||
default_text = shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
|
default_text = load_prompt(shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.model_name.lower())), 'default')])
|
||||||
title ='Text generation web UI'
|
title ='Text generation web UI'
|
||||||
description = '\n\n# Text generation lab\nGenerate text using Large Language Models.\n'
|
|
||||||
|
|
||||||
def create_interface():
|
def create_interface():
|
||||||
|
|
||||||
|
@ -18,18 +18,17 @@
|
|||||||
],
|
],
|
||||||
"presets": {
|
"presets": {
|
||||||
"default": "NovelAI-Sphinx Moth",
|
"default": "NovelAI-Sphinx Moth",
|
||||||
"pygmalion-*": "Pygmalion",
|
".*pygmalion": "Pygmalion",
|
||||||
"RWKV-*": "Naive"
|
".*RWKV": "Naive"
|
||||||
},
|
},
|
||||||
"prompts": {
|
"prompts": {
|
||||||
"default": "Common sense questions and answers\n\nQuestion: \nFactual answer:",
|
"default": "QA",
|
||||||
"^(gpt4chan|gpt-4chan|4chan)": "-----\n--- 865467536\nInput text\n--- 865467537\n",
|
".*(gpt4chan|gpt-4chan|4chan)": "GPT-4chan",
|
||||||
"(rosey|chip|joi)_.*_instruct.*": "User: \n",
|
".*oasst": "Open Assistant",
|
||||||
"oasst-*": "<|prompter|>Write a story about future of AI development<|endoftext|><|assistant|>",
|
".*alpaca": "Alpaca"
|
||||||
"alpaca-*": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n### Instruction:\nWrite a poem about the transformers Python library. \nMention the word \"large language models\" in that poem.\n### Response:\n"
|
|
||||||
},
|
},
|
||||||
"lora_prompts": {
|
"lora_prompts": {
|
||||||
"default": "Common sense questions and answers\n\nQuestion: \nFactual answer:",
|
"default": "QA",
|
||||||
"(alpaca-lora-7b|alpaca-lora-13b|alpaca-lora-30b)": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n### Instruction:\nWrite a poem about the transformers Python library. \nMention the word \"large language models\" in that poem.\n### Response:\n"
|
".*(alpaca-lora-7b|alpaca-lora-13b|alpaca-lora-30b)": "Alpaca"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user