llama : use n_threads_batch only when n_tokens >= 32

ggml-ci
This commit is contained in:
Georgi Gerganov 2023-11-27 20:48:44 +02:00
parent f815fe43d3
commit e9b7a5cbd0
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

View File

@ -5433,7 +5433,7 @@ static int llama_decode_internal(
GGML_ASSERT(n_tokens <= n_batch);
int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
int n_threads = n_tokens < 32 ? cparams.n_threads : cparams.n_threads_batch;
GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
const int64_t t_start_us = ggml_time_us();
@ -5550,18 +5550,8 @@ static int llama_decode_internal(
n_threads = std::min(4, n_threads);
}
// If all tensors can be run on the GPU then using more than 1 thread is detrimental.
const bool full_offload_supported =
model.arch == LLM_ARCH_LLAMA ||
model.arch == LLM_ARCH_BAICHUAN ||
model.arch == LLM_ARCH_FALCON ||
model.arch == LLM_ARCH_REFACT ||
model.arch == LLM_ARCH_MPT ||
model.arch == LLM_ARCH_STARCODER ||
model.arch == LLM_ARCH_STABLELM;
const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3;
if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) {
if (ggml_cpu_has_cublas() && fully_offloaded) {
n_threads = 1;
}