text-generation-webui/settings-template.json

40 lines
1.1 KiB
JSON
Raw Normal View History

{
2023-03-17 15:41:12 +01:00
"max_new_tokens": 200,
"max_new_tokens_min": 1,
"max_new_tokens_max": 2000,
2023-03-31 17:22:07 +02:00
"seed": -1,
"name1": "You",
"name2": "Assistant",
"context": "This is a conversation with your Assistant. The Assistant is very helpful and is eager to chat with you and answer your questions.",
2023-04-03 19:59:26 +02:00
"greeting": "Hello there!",
2023-04-10 21:53:07 +02:00
"end_of_turn": "",
2023-03-18 14:55:57 +01:00
"stop_at_newline": false,
2023-04-10 21:53:07 +02:00
"add_bos_token": true,
2023-03-17 15:41:12 +01:00
"chat_prompt_size": 2048,
"chat_prompt_size_min": 0,
"chat_prompt_size_max": 2048,
"chat_generation_attempts": 1,
"chat_generation_attempts_min": 1,
"chat_generation_attempts_max": 5,
"default_extensions": [],
"chat_default_extensions": [
"gallery"
],
"presets": {
"default": "Default",
2023-04-10 21:53:07 +02:00
".*(alpaca|llama)": "LLaMA-Precise",
2023-03-31 15:43:05 +02:00
".*pygmalion": "NovelAI-Storywriter",
".*RWKV": "Naive"
2023-03-17 15:41:12 +01:00
},
"prompts": {
"default": "QA",
".*(gpt4chan|gpt-4chan|4chan)": "GPT-4chan",
".*oasst": "Open Assistant",
".*alpaca": "Alpaca"
2023-03-17 15:41:12 +01:00
},
"lora_prompts": {
"default": "QA",
".*(alpaca-lora-7b|alpaca-lora-13b|alpaca-lora-30b)": "Alpaca"
2023-03-17 15:41:12 +01:00
}
}