From f675b20a3b7f878bf3be766b9a737e2c8321ff0d Mon Sep 17 00:00:00 2001 From: kustaaya <58045274+kustaaya@users.noreply.github.com> Date: Thu, 27 Jun 2024 11:58:54 +0300 Subject: [PATCH] Added support for Viking pre-tokenizer (#8135) Co-authored-by: kustaaya --- convert-hf-to-gguf-update.py | 1 + convert-hf-to-gguf.py | 3 +++ include/llama.h | 1 + src/llama.cpp | 9 +++++++++ 4 files changed, 14 insertions(+) diff --git a/convert-hf-to-gguf-update.py b/convert-hf-to-gguf-update.py index 67598b561..2758214fa 100755 --- a/convert-hf-to-gguf-update.py +++ b/convert-hf-to-gguf-update.py @@ -85,6 +85,7 @@ models = [ {"name": "smaug-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct", }, {"name": "poro-chat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Poro-34B-chat", }, {"name": "jina-v2-code", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-code", }, + {"name": "viking", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Viking-7B", }, # Also used for Viking 13B and 33B ] diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index c26fad930..5bf69ef9f 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -487,6 +487,9 @@ class Model: if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a": # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code res = "jina-v2-code" + if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee": + # ref: https://huggingface.co/LumiOpen/Viking-7B + res = "viking" if res is None: logger.warning("\n") diff --git a/include/llama.h b/include/llama.h index 88eecb0ed..cafeafb85 100644 --- a/include/llama.h +++ b/include/llama.h @@ -88,6 +88,7 @@ extern "C" { LLAMA_VOCAB_PRE_TYPE_DBRX = 13, LLAMA_VOCAB_PRE_TYPE_SMAUG = 14, LLAMA_VOCAB_PRE_TYPE_PORO = 15, + LLAMA_VOCAB_PRE_TYPE_VIKING = 16, }; // note: these values should be synchronized with ggml_rope diff --git a/src/llama.cpp b/src/llama.cpp index 080057332..b97b5e279 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -5067,6 +5067,9 @@ static void llm_load_vocab( } else if ( tokenizer_pre == "poro-chat") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO; + } else if ( + tokenizer_pre == "viking") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_VIKING; } else { throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); } @@ -13703,6 +13706,12 @@ struct llm_tokenizer_bpe { " ?[^(\\s|.,!?…。,、।۔،)]+", }; break; + case LLAMA_VOCAB_PRE_TYPE_VIKING: + regex_exprs = { + "\\p{N}", + " ?[^(\\s|.,!?…。,、।۔،)]+", + }; + break; default: // default regex for BPE tokenization pre-processing regex_exprs = {