llama : fix compile warnings when reading the vocab

This commit is contained in:
Georgi Gerganov 2023-03-29 22:13:12 +03:00
parent cea1c85948
commit 0ba76c1e73
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

View File

@ -1444,7 +1444,7 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s
return false; return false;
} }
std::string word; std::vector<char> word(32);
vocab.id_to_token.resize(n_vocab); vocab.id_to_token.resize(n_vocab);
for (int i = 0; i < n_vocab; i++) { for (int i = 0; i < n_vocab; i++) {
uint32_t len; uint32_t len;
@ -1459,10 +1459,10 @@ static bool llama_model_quantize_internal(const std::string & fname_inp, const s
finp.read ((char *) &score, sizeof(score)); finp.read ((char *) &score, sizeof(score));
fout.write((char *) &score, sizeof(score)); fout.write((char *) &score, sizeof(score));
vocab.token_to_id[word] = i; vocab.token_to_id[word.data()] = i;
auto &tok_score = vocab.id_to_token[i]; auto &tok_score = vocab.id_to_token[i];
tok_score.tok = word; tok_score.tok = word.data();
tok_score.score = score; tok_score.score = score;
} }
} }