mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 13:28:50 +01:00
llama : mark LLM_ARCH_STARCODER as full offload supported (#3945)
as done in https://github.com/ggerganov/llama.cpp/pull/3827
This commit is contained in:
parent
c41ea36eaa
commit
3d48f42efc
11
llama.cpp
11
llama.cpp
@ -5164,11 +5164,12 @@ static int llama_decode_internal(
|
|||||||
|
|
||||||
// If all tensors can be run on the GPU then using more than 1 thread is detrimental.
|
// If all tensors can be run on the GPU then using more than 1 thread is detrimental.
|
||||||
const bool full_offload_supported =
|
const bool full_offload_supported =
|
||||||
model.arch == LLM_ARCH_LLAMA ||
|
model.arch == LLM_ARCH_LLAMA ||
|
||||||
model.arch == LLM_ARCH_BAICHUAN ||
|
model.arch == LLM_ARCH_BAICHUAN ||
|
||||||
model.arch == LLM_ARCH_FALCON ||
|
model.arch == LLM_ARCH_FALCON ||
|
||||||
model.arch == LLM_ARCH_REFACT ||
|
model.arch == LLM_ARCH_REFACT ||
|
||||||
model.arch == LLM_ARCH_MPT;
|
model.arch == LLM_ARCH_MPT ||
|
||||||
|
model.arch == LLM_ARCH_STARCODER;
|
||||||
|
|
||||||
const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3;
|
const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3;
|
||||||
if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) {
|
if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) {
|
||||||
|
Loading…
Reference in New Issue
Block a user