Read more metadata (config.json & quantize_config.json)

This commit is contained in:
oobabooga 2023-09-29 06:14:16 -07:00
parent 56b5a4af74
commit 96da2e1c0d
3 changed files with 59 additions and 67 deletions

View File

@ -6,8 +6,6 @@
model_type: 'gptj' model_type: 'gptj'
.*(gpt-neox|koalpaca-polyglot|polyglot.*koalpaca|polyglot-ko|polyglot_ko|pythia|stablelm|incite|dolly-v2|polycoder|h2ogpt-oig|h2ogpt-oasst1|h2ogpt-gm): .*(gpt-neox|koalpaca-polyglot|polyglot.*koalpaca|polyglot-ko|polyglot_ko|pythia|stablelm|incite|dolly-v2|polycoder|h2ogpt-oig|h2ogpt-oasst1|h2ogpt-gm):
model_type: 'gptneox' model_type: 'gptneox'
.*llama:
model_type: 'llama'
.*bloom: .*bloom:
model_type: 'bloom' model_type: 'bloom'
.*gpt2: .*gpt2:
@ -22,38 +20,32 @@
model_type: 'dollyv2' model_type: 'dollyv2'
.*replit: .*replit:
model_type: 'replit' model_type: 'replit'
llama-65b-gptq-3bit:
groupsize: 'None'
.*(4bit|int4):
wbits: 4
.*(3bit|int3):
wbits: 3
.*(-2bit|_2bit|int2-):
wbits: 2
.*(-1bit|_1bit|int1-):
wbits: 1
.*(8bit|int8):
wbits: 8
.*(-7bit|_7bit|int7-):
wbits: 7
.*(-6bit|_6bit|int6-):
wbits: 6
.*(-5bit|_5bit|int5-):
wbits: 5
.*(-gr32-|-32g-|groupsize32|-32g$):
groupsize: 32
.*(-gr64-|-64g-|groupsize64|-64g$):
groupsize: 64
.*(gr128|128g|groupsize128):
groupsize: 128
.*(gr1024|1024g|groupsize1024):
groupsize: 1024
.*(oasst|openassistant-|stablelm-7b-sft-v7-epoch-3): .*(oasst|openassistant-|stablelm-7b-sft-v7-epoch-3):
instruction_template: 'Open Assistant' instruction_template: 'Open Assistant'
skip_special_tokens: false skip_special_tokens: false
(?!.*galactica)(?!.*reward).*openassistant: (?!.*galactica)(?!.*reward).*openassistant:
instruction_template: 'Open Assistant' instruction_template: 'Open Assistant'
skip_special_tokens: false skip_special_tokens: false
.*galactica:
skip_special_tokens: false
.*dolly-v[0-9]-[0-9]*b:
instruction_template: 'Alpaca'
skip_special_tokens: false
.*alpaca-native-4bit:
instruction_template: 'Alpaca'
custom_stopping_strings: '"### End"'
.*llava:
instruction_template: 'LLaVA'
custom_stopping_strings: '"\n###"'
.*wizard.*mega:
instruction_template: 'Wizard-Mega'
custom_stopping_strings: '"</s>"'
.*starchat-beta:
instruction_template: 'Starchat-Beta'
custom_stopping_strings: '"<|end|>"'
.*(openorca-platypus2):
instruction_template: 'OpenOrca-Platypus2'
custom_stopping_strings: '"### Instruction:", "### Response:"'
(?!.*v0)(?!.*1.1)(?!.*1_1)(?!.*stable)(?!.*chinese).*vicuna: (?!.*v0)(?!.*1.1)(?!.*1_1)(?!.*stable)(?!.*chinese).*vicuna:
instruction_template: 'Vicuna-v0' instruction_template: 'Vicuna-v0'
.*vicuna.*v0: .*vicuna.*v0:
@ -70,26 +62,12 @@ llama-65b-gptq-3bit:
instruction_template: 'Chinese-Vicuna-Chat' instruction_template: 'Chinese-Vicuna-Chat'
.*alpaca: .*alpaca:
instruction_template: 'Alpaca' instruction_template: 'Alpaca'
.*alpaca-native-4bit:
instruction_template: 'Alpaca'
wbits: 4
groupsize: 128
.*galactica:
skip_special_tokens: false
.*dolly-v[0-9]-[0-9]*b:
instruction_template: 'Alpaca'
skip_special_tokens: false
custom_stopping_strings: '"### End"'
.*koala: .*koala:
instruction_template: 'Koala' instruction_template: 'Koala'
.*chatglm: .*chatglm:
instruction_template: 'ChatGLM' instruction_template: 'ChatGLM'
.*(metharme|pygmalion|mythalion): .*(metharme|pygmalion|mythalion):
instruction_template: 'Metharme' instruction_template: 'Metharme'
.*llava:
model_type: 'llama'
instruction_template: 'LLaVA'
custom_stopping_strings: '"\n###"'
.*raven: .*raven:
instruction_template: 'RWKV-Raven' instruction_template: 'RWKV-Raven'
.*moss-moon.*sft: .*moss-moon.*sft:
@ -116,9 +94,6 @@ llama-65b-gptq-3bit:
instruction_template: 'INCITE-Chat' instruction_template: 'INCITE-Chat'
.*incite.*instruct: .*incite.*instruct:
instruction_template: 'INCITE-Instruct' instruction_template: 'INCITE-Instruct'
.*wizard.*mega:
instruction_template: 'Wizard-Mega'
custom_stopping_strings: '"</s>"'
.*ziya-: .*ziya-:
instruction_template: 'Ziya' instruction_template: 'Ziya'
.*koalpaca: .*koalpaca:
@ -169,14 +144,10 @@ llama-65b-gptq-3bit:
instruction_template: 'Samantha' instruction_template: 'Samantha'
.*wizardcoder: .*wizardcoder:
instruction_template: 'Alpaca' instruction_template: 'Alpaca'
.*starchat-beta:
instruction_template: 'Starchat-Beta'
custom_stopping_strings: '"<|end|>"'
.*minotaur: .*minotaur:
instruction_template: 'Minotaur' instruction_template: 'Minotaur'
.*orca_mini: .*orca_mini:
instruction_template: 'Orca Mini' instruction_template: 'Orca Mini'
instruction_template: 'Vicuna-v0'
.*(platypus|gplatty|superplatty): .*(platypus|gplatty|superplatty):
instruction_template: 'Alpaca' instruction_template: 'Alpaca'
.*longchat: .*longchat:
@ -199,12 +170,6 @@ llama-65b-gptq-3bit:
instruction_template: 'StableBeluga2' instruction_template: 'StableBeluga2'
.*openchat: .*openchat:
instruction_template: 'OpenChat' instruction_template: 'OpenChat'
.*falcon.*-instruct:
.*(openorca-platypus2):
instruction_template: 'OpenOrca-Platypus2'
custom_stopping_strings: '"### Instruction:", "### Response:"'
.*codellama:
rope_freq_base: 1000000
.*codellama.*instruct: .*codellama.*instruct:
instruction_template: 'Llama-v2' instruction_template: 'Llama-v2'
.*mistral.*instruct: .*mistral.*instruct:

View File

@ -10,16 +10,16 @@ from modules import loaders, metadata_gguf, shared, ui
def get_fallback_settings(): def get_fallback_settings():
return { return {
'wbits': 'None', 'wbits': 'None',
'model_type': 'None',
'groupsize': 'None', 'groupsize': 'None',
'pre_layer': 0, 'desc_act': False,
'skip_special_tokens': shared.settings['skip_special_tokens'], 'model_type': 'None',
'custom_stopping_strings': shared.settings['custom_stopping_strings'],
'truncation_length': shared.settings['truncation_length'],
'max_seq_len': 2048, 'max_seq_len': 2048,
'n_ctx': 2048, 'n_ctx': 2048,
'rope_freq_base': 0, 'rope_freq_base': 0,
'compress_pos_emb': 1, 'compress_pos_emb': 1,
'truncation_length': shared.settings['truncation_length'],
'skip_special_tokens': shared.settings['skip_special_tokens'],
'custom_stopping_strings': shared.settings['custom_stopping_strings'],
} }
@ -56,8 +56,8 @@ def get_model_metadata(model):
if 'llama.rope.freq_base' in metadata: if 'llama.rope.freq_base' in metadata:
model_settings['rope_freq_base'] = metadata['llama.rope.freq_base'] model_settings['rope_freq_base'] = metadata['llama.rope.freq_base']
# Read transformers metadata. In particular, the sequence length for the model.
else: else:
# Read transformers metadata
path = Path(f'{shared.args.model_dir}/{model}/config.json') path = Path(f'{shared.args.model_dir}/{model}/config.json')
if path.exists(): if path.exists():
metadata = json.loads(open(path, 'r').read()) metadata = json.loads(open(path, 'r').read())
@ -65,6 +65,32 @@ def get_model_metadata(model):
model_settings['truncation_length'] = metadata['max_position_embeddings'] model_settings['truncation_length'] = metadata['max_position_embeddings']
model_settings['max_seq_len'] = metadata['max_position_embeddings'] model_settings['max_seq_len'] = metadata['max_position_embeddings']
if 'rope_theta' in metadata:
model_settings['rope_freq_base'] = metadata['rope_theta']
if 'rope_scaling' in metadata and type(metadata['rope_scaling']) is dict and all(key in metadata['rope_scaling'] for key in ('type', 'factor')):
if metadata['rope_scaling']['type'] == 'linear':
model_settings['compress_pos_emb'] = metadata['rope_scaling']['factor']
if 'quantization_config' in metadata:
if 'bits' in metadata['quantization_config']:
model_settings['wbits'] = metadata['quantization_config']['bits']
if 'group_size' in metadata['quantization_config']:
model_settings['groupsize'] = metadata['quantization_config']['group_size']
if 'desc_act' in metadata['quantization_config']:
model_settings['desc_act'] = metadata['quantization_config']['desc_act']
# Read AutoGPTQ metadata
path = Path(f'{shared.args.model_dir}/{model}/quantize_config.json')
if path.exists():
metadata = json.loads(open(path, 'r').read())
if 'bits' in metadata:
model_settings['wbits'] = metadata['bits']
if 'group_size' in metadata:
model_settings['groupsize'] = metadata['group_size']
if 'desc_act' in metadata:
model_settings['desc_act'] = metadata['desc_act']
# Apply user settings from models/config-user.yaml # Apply user settings from models/config-user.yaml
settings = shared.user_config settings = shared.user_config
for pat in settings: for pat in settings:

View File

@ -258,9 +258,10 @@ def download_model_wrapper(repo_id, specific_file, progress=gr.Progress(), retur
def update_truncation_length(current_length, state): def update_truncation_length(current_length, state):
if 'loader' in state:
if state['loader'].lower().startswith('exllama'): if state['loader'].lower().startswith('exllama'):
return state['max_seq_len'] return state['max_seq_len']
elif state['loader'] in ['llama.cpp', 'llamacpp_HF', 'ctransformers']: elif state['loader'] in ['llama.cpp', 'llamacpp_HF', 'ctransformers']:
return state['n_ctx'] return state['n_ctx']
else:
return current_length return current_length