From bbf3e55e352d309573bdafee01a014b0a2492155 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 14 Jan 2025 12:54:58 +0200 Subject: [PATCH] vocab : add dummy tokens for "no_vocab" type (#11231) * vocab : add dummy tokens for "no_vocab" type ggml-ci * vocab : minor [no ci] --- src/llama-vocab.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 96b74e93a..4969d2628 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -1356,8 +1356,9 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { // read vocab size from metadata uint32_t n_tokens = 0; - if (!ml.get_key(LLM_KV_VOCAB_SIZE, n_tokens, false)) { - LLAMA_LOG_WARN("%s: there is no vocab_size in metadata\n", __func__); + if (ml.get_key(LLM_KV_VOCAB_SIZE, n_tokens, false)) { + LLAMA_LOG_WARN("%s: adding %u dummy tokens\n", __func__, n_tokens); + id_to_token.resize(n_tokens); } return;