From 334486f527bc97f61eb3264def4e03a0dab9b369 Mon Sep 17 00:00:00 2001 From: minipasila Date: Wed, 10 May 2023 04:29:22 +0300 Subject: [PATCH] Added instruct-following template for Metharme (#1679) --- characters/instruction-following/Metharme.yaml | 4 ++++ models/config.yaml | 3 +++ modules/GPTQ_loader.py | 4 ++-- modules/shared.py | 1 + prompts/Metharme.txt | 5 +++++ settings-template.json | 8 +++++--- 6 files changed, 20 insertions(+), 5 deletions(-) create mode 100644 characters/instruction-following/Metharme.yaml create mode 100644 prompts/Metharme.txt diff --git a/characters/instruction-following/Metharme.yaml b/characters/instruction-following/Metharme.yaml new file mode 100644 index 00000000..79637331 --- /dev/null +++ b/characters/instruction-following/Metharme.yaml @@ -0,0 +1,4 @@ +name: "<|model|>" +your_name: "<|user|>" +context: "<|system|>" +turn_template: "<|user|><|user-message|><|bot|><|bot-message|>" diff --git a/models/config.yaml b/models/config.yaml index e73349f9..806206f2 100644 --- a/models/config.yaml +++ b/models/config.yaml @@ -74,6 +74,9 @@ .*chatglm: mode: 'instruct' instruction_template: 'ChatGLM' +.*metharme: + mode: 'instruct' + instruction_template: 'Metharme' .*llava: mode: 'instruct' model_type: 'llama' diff --git a/modules/GPTQ_loader.py b/modules/GPTQ_loader.py index 3b138f62..5faec390 100644 --- a/modules/GPTQ_loader.py +++ b/modules/GPTQ_loader.py @@ -147,9 +147,9 @@ def load_quantized(model_name): name = model_name.lower() if any((k in name for k in ['opt-', 'opt_', 'opt1', 'opt3', 'optfor', 'galactica', 'galpaca', 'pygmalion-350m'])): model_type = 'opt' - elif any((k in name for k in ['gpt-j', 'gptj', 'gpt4all-j', 'malion-6b', 'pygway'])): + elif any((k in name for k in ['gpt-j', 'gptj', 'gpt4all-j', 'malion-6b', 'pygway', 'pygmalion-6b'])): model_type = 'gptj' - elif any((k in name for k in ['llama', 'alpac', 'vicuna', 'guanaco', 'koala', 'llava', 'wizardlm'])): + elif any((k in name for k in ['llama', 'alpac', 'vicuna', 'guanaco', 'koala', 'llava', 'wizardlm', 'metharme'])): model_type = 'llama' else: logging.error("Can't determine model type from model name. Please specify it manually using --model_type argument") diff --git a/modules/shared.py b/modules/shared.py index 2b1187ab..6e5404f8 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -78,6 +78,7 @@ settings = { ".*vicuna.*v0": "Vicuna v0", ".*vicuna.*(1.1|1_1)": "Vicuna v1.1", ".*stable.*vicuna": "StableVicuna", + '.*metharme': 'Metharme', ".*guanaco": "Guanaco-Chat", ".*koala": "Koala", ".*stablelm-tuned": "StableLM", diff --git a/prompts/Metharme.txt b/prompts/Metharme.txt new file mode 100644 index 00000000..fc4a4ea4 --- /dev/null +++ b/prompts/Metharme.txt @@ -0,0 +1,5 @@ +<|system|>This is a text adventure game. Describe the scenario to the user and give him three options to pick from on each turn.<|user|>Start!<|model|>You are standing in front of an old, abandoned house. The windows are boarded up, and there's no sign of life around it. As you approach, you notice a strange feeling emanating from within. Suddenly, you hear a voice calling out to you... 'Come inside!' + +- Go inside the house. +- Ignore the call and move away. +- Run as fast as you can.<|user|>go inside<|model|> \ No newline at end of file diff --git a/settings-template.json b/settings-template.json index 4058f339..52eb9e7b 100644 --- a/settings-template.json +++ b/settings-template.json @@ -35,17 +35,20 @@ "default": "Default", ".*(alpaca|llama|llava)": "LLaMA-Precise", ".*pygmalion": "NovelAI-Storywriter", - ".*RWKV": "Naive" + ".*RWKV": "Naive", + ".*moss": "MOSS" }, "prompts": { "default": "QA", ".*(gpt4chan|gpt-4chan|4chan)": "GPT-4chan", ".*(oasst|stablelm-7b-sft-v7-epoch-3)": "Open Assistant", ".*(alpac|dolly)": "Alpaca", + ".*mpt-.*instruct": "Alpaca", "(?!.*v0)(?!.*1.1)(?!.*1_1)(?!.*stable).*vicuna": "Vicuna v0", ".*vicuna.*v0": "Vicuna v0", ".*vicuna.*(1.1|1_1)": "Vicuna v1.1", ".*stable.*vicuna": "StableVicuna", + ".*metharme": "Metharme", ".*guanaco": "Guanaco-Chat", ".*koala": "Koala", ".*stablelm-tuned": "StableLM", @@ -54,7 +57,6 @@ ".*galactica.*-v2": "Galactica v2", "(?!.*finetuned)(?!.*-v2).*galactica": "Galactica", ".*baize": "Baize", - ".*mpt-.*instruct": "Alpaca", ".*mpt-.*chat": "MPT-Chat", "(?!.*-flan-)(?!.*-t5-).*lamini-": "Alpaca", ".*incite.*chat": "INCITE-Chat", @@ -62,7 +64,7 @@ }, "lora_prompts": { "default": "QA", - ".*(alpaca-lora-7b|alpaca-lora-13b|alpaca-lora-30b)": "Alpaca", + ".*alpaca": "Alpaca", ".*baize": "Baize" } }