mirror of
https://github.com/oobabooga/text-generation-webui.git
synced 2024-11-22 16:17:57 +01:00
Several QoL changes related to LoRA
This commit is contained in:
parent
0cecfc684c
commit
214dc6868e
@ -53,6 +53,10 @@ settings = {
|
|||||||
'^(gpt4chan|gpt-4chan|4chan)': '-----\n--- 865467536\nInput text\n--- 865467537\n',
|
'^(gpt4chan|gpt-4chan|4chan)': '-----\n--- 865467536\nInput text\n--- 865467537\n',
|
||||||
'(rosey|chip|joi)_.*_instruct.*': 'User: \n',
|
'(rosey|chip|joi)_.*_instruct.*': 'User: \n',
|
||||||
'oasst-*': '<|prompter|>Write a story about future of AI development<|endoftext|><|assistant|>'
|
'oasst-*': '<|prompter|>Write a story about future of AI development<|endoftext|><|assistant|>'
|
||||||
|
},
|
||||||
|
'lora_prompts': {
|
||||||
|
'default': 'Common sense questions and answers\n\nQuestion: \nFactual answer:',
|
||||||
|
'alpaca-lora-7b': "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n### Instruction:\nWrite a Python script that generates text using the transformers library.\n### Response:\n"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -68,6 +72,7 @@ def str2bool(v):
|
|||||||
|
|
||||||
parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog,max_help_position=54))
|
parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog,max_help_position=54))
|
||||||
parser.add_argument('--model', type=str, help='Name of the model to load by default.')
|
parser.add_argument('--model', type=str, help='Name of the model to load by default.')
|
||||||
|
parser.add_argument('--lora', type=str, help='Name of the LoRA to apply to the model by default.')
|
||||||
parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.')
|
parser.add_argument('--notebook', action='store_true', help='Launch the web UI in notebook mode, where the output is written to the same text box as the input.')
|
||||||
parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode.')
|
parser.add_argument('--chat', action='store_true', help='Launch the web UI in chat mode.')
|
||||||
parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.')
|
parser.add_argument('--cai-chat', action='store_true', help='Launch the web UI in chat mode with a style similar to Character.AI\'s. If the file img_bot.png or img_bot.jpg exists in the same folder as server.py, this image will be used as the bot\'s profile picture. Similarly, img_me.png or img_me.jpg will be used as your profile picture.')
|
||||||
|
@ -225,10 +225,16 @@ else:
|
|||||||
print()
|
print()
|
||||||
shared.model_name = available_models[i]
|
shared.model_name = available_models[i]
|
||||||
shared.model, shared.tokenizer = load_model(shared.model_name)
|
shared.model, shared.tokenizer = load_model(shared.model_name)
|
||||||
|
if shared.args.lora:
|
||||||
|
shared.lora_name = shared.args.lora
|
||||||
|
print(f"Adding the LoRA {shared.lora_name} to the model...")
|
||||||
|
add_lora_to_model(shared.lora_name)
|
||||||
|
|
||||||
# Default UI settings
|
# Default UI settings
|
||||||
default_preset = shared.settings['presets'][next((k for k in shared.settings['presets'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
|
default_preset = shared.settings['presets'][next((k for k in shared.settings['presets'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
|
||||||
default_text = shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
|
default_text = shared.settings['lora_prompts'][next((k for k in shared.settings['lora_prompts'] if re.match(k.lower(), shared.lora_name.lower())), 'default')]
|
||||||
|
if default_text == '':
|
||||||
|
default_text = shared.settings['prompts'][next((k for k in shared.settings['prompts'] if re.match(k.lower(), shared.model_name.lower())), 'default')]
|
||||||
title ='Text generation web UI'
|
title ='Text generation web UI'
|
||||||
description = '\n\n# Text generation lab\nGenerate text using Large Language Models.\n'
|
description = '\n\n# Text generation lab\nGenerate text using Large Language Models.\n'
|
||||||
suffix = '_pygmalion' if 'pygmalion' in shared.model_name.lower() else ''
|
suffix = '_pygmalion' if 'pygmalion' in shared.model_name.lower() else ''
|
||||||
|
@ -1,35 +1,38 @@
|
|||||||
{
|
{
|
||||||
"max_new_tokens": 200,
|
"max_new_tokens": 200,
|
||||||
"max_new_tokens_min": 1,
|
"max_new_tokens_min": 1,
|
||||||
"max_new_tokens_max": 2000,
|
"max_new_tokens_max": 2000,
|
||||||
"name1": "Person 1",
|
"name1": "Person 1",
|
||||||
"name2": "Person 2",
|
"name2": "Person 2",
|
||||||
"context": "This is a conversation between two people.",
|
"context": "This is a conversation between two people.",
|
||||||
"stop_at_newline": true,
|
"stop_at_newline": true,
|
||||||
"chat_prompt_size": 2048,
|
"chat_prompt_size": 2048,
|
||||||
"chat_prompt_size_min": 0,
|
"chat_prompt_size_min": 0,
|
||||||
"chat_prompt_size_max": 2048,
|
"chat_prompt_size_max": 2048,
|
||||||
"chat_generation_attempts": 1,
|
"chat_generation_attempts": 1,
|
||||||
"chat_generation_attempts_min": 1,
|
"chat_generation_attempts_min": 1,
|
||||||
"chat_generation_attempts_max": 5,
|
"chat_generation_attempts_max": 5,
|
||||||
"name1_pygmalion": "You",
|
"name1_pygmalion": "You",
|
||||||
"name2_pygmalion": "Kawaii",
|
"name2_pygmalion": "Kawaii",
|
||||||
"context_pygmalion": "Kawaii's persona: Kawaii is a cheerful person who loves to make others smile. She is an optimist who loves to spread happiness and positivity wherever she goes.\n<START>",
|
"context_pygmalion": "Kawaii's persona: Kawaii is a cheerful person who loves to make others smile. She is an optimist who loves to spread happiness and positivity wherever she goes.\n<START>",
|
||||||
"stop_at_newline_pygmalion": false,
|
"stop_at_newline_pygmalion": false,
|
||||||
"default_extensions": [],
|
"default_extensions": [],
|
||||||
"chat_default_extensions": [
|
"chat_default_extensions": [
|
||||||
"gallery"
|
"gallery"
|
||||||
],
|
],
|
||||||
"presets": {
|
"presets": {
|
||||||
"default": "NovelAI-Sphinx Moth",
|
"default": "NovelAI-Sphinx Moth",
|
||||||
"pygmalion-*": "Pygmalion",
|
"pygmalion-*": "Pygmalion",
|
||||||
"RWKV-*": "Naive",
|
"RWKV-*": "Naive"
|
||||||
"(rosey|chip|joi)_.*_instruct.*": "Instruct Joi (Contrastive Search)"
|
},
|
||||||
},
|
"prompts": {
|
||||||
"prompts": {
|
"default": "Common sense questions and answers\n\nQuestion: \nFactual answer:",
|
||||||
"default": "Common sense questions and answers\n\nQuestion: \nFactual answer:",
|
"^(gpt4chan|gpt-4chan|4chan)": "-----\n--- 865467536\nInput text\n--- 865467537\n",
|
||||||
"^(gpt4chan|gpt-4chan|4chan)": "-----\n--- 865467536\nInput text\n--- 865467537\n",
|
"(rosey|chip|joi)_.*_instruct.*": "User: \n",
|
||||||
"(rosey|chip|joi)_.*_instruct.*": "User: \n",
|
"oasst-*": "<|prompter|>Write a story about future of AI development<|endoftext|><|assistant|>"
|
||||||
"oasst-*": "<|prompter|>Write a story about future of AI development<|endoftext|><|assistant|>"
|
},
|
||||||
}
|
"lora_prompts": {
|
||||||
|
"default": "Common sense questions and answers\n\nQuestion: \nFactual answer:",
|
||||||
|
"alpaca-lora-7b": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n### Instruction:\nWrite a Python script that generates text using the transformers library.\n### Response:\n"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user