mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-15 14:50:51 +01:00
vocab : add dummy tokens for "no_vocab" type (#11231)
* vocab : add dummy tokens for "no_vocab" type ggml-ci * vocab : minor [no ci]
This commit is contained in:
parent
c5bf0d1bd7
commit
bbf3e55e35
@ -1356,8 +1356,9 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|||||||
|
|
||||||
// read vocab size from metadata
|
// read vocab size from metadata
|
||||||
uint32_t n_tokens = 0;
|
uint32_t n_tokens = 0;
|
||||||
if (!ml.get_key(LLM_KV_VOCAB_SIZE, n_tokens, false)) {
|
if (ml.get_key(LLM_KV_VOCAB_SIZE, n_tokens, false)) {
|
||||||
LLAMA_LOG_WARN("%s: there is no vocab_size in metadata\n", __func__);
|
LLAMA_LOG_WARN("%s: adding %u dummy tokens\n", __func__, n_tokens);
|
||||||
|
id_to_token.resize(n_tokens);
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
Loading…
Reference in New Issue
Block a user