llama : mark LLM_ARCH_STARCODER as full offload supported (#3945)

as done in https://github.com/ggerganov/llama.cpp/pull/3827
This commit is contained in:
Meng Zhang 2023-11-05 04:40:08 -08:00 committed by GitHub
parent c41ea36eaa
commit 3d48f42efc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -5164,11 +5164,12 @@ static int llama_decode_internal(
// If all tensors can be run on the GPU then using more than 1 thread is detrimental. // If all tensors can be run on the GPU then using more than 1 thread is detrimental.
const bool full_offload_supported = const bool full_offload_supported =
model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_LLAMA ||
model.arch == LLM_ARCH_BAICHUAN || model.arch == LLM_ARCH_BAICHUAN ||
model.arch == LLM_ARCH_FALCON || model.arch == LLM_ARCH_FALCON ||
model.arch == LLM_ARCH_REFACT || model.arch == LLM_ARCH_REFACT ||
model.arch == LLM_ARCH_MPT; model.arch == LLM_ARCH_MPT ||
model.arch == LLM_ARCH_STARCODER;
const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3; const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3;
if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) { if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) {