text-generation-webui/models/config.yaml

62 lines
1.3 KiB
YAML
Raw Normal View History

.*:
wbits: 'None'
model_type: 'None'
groupsize: 'None'
pre_layer: 0
mode: 'chat'
skip_special_tokens: true
custom_stopping_strings: ''
llama-[0-9]*b-4bit$:
wbits: 4
model_type: 'llama'
.*-(4bit|int4)-(gr128|128g):
wbits: 4
groupsize: 128
.*-(gr128|128g)-(4bit|int4):
wbits: 4
groupsize: 128
.*-3bit-(gr128|128g):
wbits: 3
groupsize: 128
.*-(gr128|128g)-3bit:
wbits: 3
groupsize: 128
2023-05-04 20:13:37 +02:00
.*(oasst-sft-1-pythia-12b|oasst-sft-6-llama-30b):
mode: 'instruct'
instruction_template: 'Open Assistant'
.*vicuna:
mode: 'instruct'
2023-04-26 21:20:27 +02:00
instruction_template: 'Vicuna-v0'
.*alpaca:
mode: 'instruct'
instruction_template: 'Alpaca'
2023-04-14 16:15:59 +02:00
.*alpaca-native-4bit:
mode: 'instruct'
instruction_template: 'Alpaca'
wbits: 4
groupsize: 128
.*(galactica|oasst):
skip_special_tokens: false
.*dolly-v[0-9]-[0-9]*b:
mode: 'instruct'
instruction_template: 'Alpaca'
skip_special_tokens: false
custom_stopping_strings: '"### End"'
2023-04-16 19:40:45 +02:00
.*koala:
mode: 'instruct'
instruction_template: 'Koala'
.*chatglm:
mode: 'instruct'
instruction_template: 'ChatGLM'
2023-04-24 01:32:22 +02:00
.*llava:
mode: 'instruct'
model_type: 'llama'
instruction_template: 'LLaVA'
2023-04-26 08:47:34 +02:00
custom_stopping_strings: '"\n###"'
2023-04-26 08:21:53 +02:00
.*raven:
mode: 'instruct'
instruction_template: 'RWKV-Raven'
2023-05-05 04:19:23 +02:00
.*moss-moon.*sft:
mode: 'instruct'
instruction_template: 'MOSS'