mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-28 15:18:26 +01:00
llama : revert n_threads_batch logic
ggml-ci
This commit is contained in:
parent
e9b7a5cbd0
commit
87f4102a70
@ -5433,7 +5433,7 @@ static int llama_decode_internal(
|
|||||||
|
|
||||||
GGML_ASSERT(n_tokens <= n_batch);
|
GGML_ASSERT(n_tokens <= n_batch);
|
||||||
|
|
||||||
int n_threads = n_tokens < 32 ? cparams.n_threads : cparams.n_threads_batch;
|
int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
|
||||||
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
|
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
|
||||||
|
|
||||||
const int64_t t_start_us = ggml_time_us();
|
const int64_t t_start_us = ggml_time_us();
|
||||||
|
Loading…
Reference in New Issue
Block a user