mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-27 20:43:07 +01:00
6171c9d258
* Copy minja from58f0ca6dd7
* Add --jinja and --chat-template-file flags * Add missing <optional> include * Avoid print in get_hf_chat_template.py * No designated initializers yet * Try and work around msvc++ non-macro max resolution quirk * Update test_chat_completion.py * Wire LLM_KV_TOKENIZER_CHAT_TEMPLATE_N in llama_model_chat_template * Refactor test-chat-template * Test templates w/ minja * Fix deprecation * Add --jinja to llama-run * Update common_chat_format_example to use minja template wrapper * Test chat_template in e2e test * Update utils.py * Update test_chat_completion.py * Update run.cpp * Update arg.cpp * Refactor common_chat_* functions to accept minja template + use_jinja option * Attempt to fix linkage of LLAMA_CHATML_TEMPLATE * Revert LLAMA_CHATML_TEMPLATE refactor * Normalize newlines in test-chat-templates for windows tests * Forward decl minja::chat_template to avoid eager json dep * Flush stdout in chat template before potential crash * Fix copy elision warning * Rm unused optional include * Add missing optional include to server.cpp * Disable jinja test that has a cryptic windows failure * minja: fix vigogne (https://github.com/google/minja/pull/22) * Apply suggestions from code review Co-authored-by: Xuan Son Nguyen <thichthat@gmail.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Finish suggested renamings * Move chat_templates inside server_context + remove mutex * Update --chat-template-file w/ recent change to --chat-template * Refactor chat template validation * Guard against missing eos/bos tokens (null token otherwise throws in llama_vocab::impl::token_get_attr) * Warn against missing eos / bos tokens when jinja template references them * rename: common_chat_template[s] * reinstate assert on chat_templates.template_default * Update minja tob8437df626
* Update minja to https://github.com/google/minja/pull/25 * Update minja from https://github.com/google/minja/pull/27 * rm unused optional header --------- Co-authored-by: Xuan Son Nguyen <thichthat@gmail.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
78 lines
2.9 KiB
Python
Executable File
78 lines
2.9 KiB
Python
Executable File
#!/usr/bin/env python
|
|
'''
|
|
Fetches the Jinja chat template of a HuggingFace model.
|
|
If a model has multiple chat templates, you can specify the variant name.
|
|
|
|
Syntax:
|
|
./scripts/get_hf_chat_template.py model_id [variant]
|
|
|
|
Examples:
|
|
./scripts/get_hf_chat_template.py NousResearch/Meta-Llama-3-8B-Instruct
|
|
./scripts/get_hf_chat_template.py NousResearch/Hermes-3-Llama-3.1-8B tool_use
|
|
./scripts/get_hf_chat_template.py meta-llama/Llama-3.2-3B-Instruct
|
|
'''
|
|
|
|
import json
|
|
import re
|
|
import sys
|
|
|
|
|
|
def get_hf_chat_template(model_id, variant=None):
|
|
try:
|
|
# Use huggingface_hub library if available.
|
|
# Allows access to gated models if the user has access and ran `huggingface-cli login`.
|
|
from huggingface_hub import hf_hub_download
|
|
with open(hf_hub_download(repo_id=model_id, filename="tokenizer_config.json")) as f:
|
|
config_str = f.read()
|
|
except ImportError:
|
|
import requests
|
|
assert re.match(r"^[\w.-]+/[\w.-]+$", model_id), f"Invalid model ID: {model_id}"
|
|
response = requests.get(f"https://huggingface.co/{model_id}/resolve/main/tokenizer_config.json")
|
|
if response.status_code == 401:
|
|
raise Exception('Access to this model is gated, please request access, authenticate with `huggingface-cli login` and make sure to run `pip install huggingface_hub`')
|
|
response.raise_for_status()
|
|
config_str = response.text
|
|
|
|
try:
|
|
config = json.loads(config_str)
|
|
except json.JSONDecodeError:
|
|
# Fix https://huggingface.co/NousResearch/Meta-Llama-3-8B-Instruct/blob/main/tokenizer_config.json
|
|
# (Remove extra '}' near the end of the file)
|
|
config = json.loads(re.sub(r'\}([\n\s]*\}[\n\s]*\],[\n\s]*"clean_up_tokenization_spaces")', r'\1', config_str))
|
|
|
|
chat_template = config['chat_template']
|
|
if isinstance(chat_template, str):
|
|
return chat_template
|
|
else:
|
|
variants = {
|
|
ct['name']: ct['template']
|
|
for ct in chat_template
|
|
}
|
|
|
|
def format_variants():
|
|
return ', '.join(f'"{v}"' for v in variants.keys())
|
|
|
|
if variant is None:
|
|
if 'default' not in variants:
|
|
raise Exception(f'Please specify a chat template variant (one of {format_variants()})')
|
|
variant = 'default'
|
|
sys.stderr.write(f'Note: picked "default" chat template variant (out of {format_variants()})\n')
|
|
elif variant not in variants:
|
|
raise Exception(f"Variant {variant} not found in chat template (found {format_variants()})")
|
|
|
|
return variants[variant]
|
|
|
|
|
|
def main(args):
|
|
if len(args) < 1:
|
|
raise ValueError("Please provide a model ID and an optional variant name")
|
|
model_id = args[0]
|
|
variant = None if len(args) < 2 else args[1]
|
|
|
|
template = get_hf_chat_template(model_id, variant)
|
|
sys.stdout.write(template)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main(sys.argv[1:])
|