From 15b67a66c2f2d6032415b28a699b5131962318f1 Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 7 Sep 2023 15:52:34 +0200 Subject: [PATCH] llama-bench : use two tokens in the warmup run for prompt evals (#3059) --- examples/llama-bench/llama-bench.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 72a025077..dedaa34fd 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -986,7 +986,12 @@ int main(int argc, char ** argv) { test t(inst, lmodel, ctx); // warmup run - test_gen(ctx, 1, 0, t.n_threads); + if (t.n_prompt > 0) { + test_prompt(ctx, std::min(2, t.n_batch), 0, t.n_batch, t.n_threads); + } + if (t.n_gen > 0) { + test_gen(ctx, 1, 0, t.n_threads); + } for (int i = 0; i < params.reps; i++) { uint64_t t_start = get_time_ns();