llama : revert n_threads_batch logic

ggml-ci
This commit is contained in:
Georgi Gerganov 2023-11-27 21:21:23 +02:00
parent e9b7a5cbd0
commit 87f4102a70
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

View File

@ -5433,7 +5433,7 @@ static int llama_decode_internal(
GGML_ASSERT(n_tokens <= n_batch);
int n_threads = n_tokens < 32 ? cparams.n_threads : cparams.n_threads_batch;
int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
const int64_t t_start_us = ggml_time_us();