mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-01 00:39:00 +01:00
common : warm-up with 2 tokens - seems to work better
This commit is contained in:
parent
013457885a
commit
ebe41d49a6
@ -770,7 +770,7 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
|
||||
{
|
||||
LOG("warming up the model with an empty run\n");
|
||||
|
||||
const std::vector<llama_token> tmp = { llama_token_bos(lctx), };
|
||||
const std::vector<llama_token> tmp = { llama_token_bos(lctx), llama_token_eos(lctx), };
|
||||
llama_eval(lctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
||||
llama_reset_timings(lctx);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user