mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-26 12:21:40 +01:00
llama : use the same threshold for OpenBLAS and ggml thread limiting (#577)
This commit is contained in:
parent
a6956b25a1
commit
41318d708e
@ -856,7 +856,7 @@ static bool llama_eval_internal(
|
||||
// for big prompts, if BLAS is enabled, it is better to use only one thread
|
||||
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
|
||||
ggml_cgraph gf = {};
|
||||
gf.n_threads = N > 255 && ggml_cpu_has_blas() ? 1 : n_threads;
|
||||
gf.n_threads = N >= 32 && ggml_cpu_has_blas() ? 1 : n_threads;
|
||||
|
||||
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
memcpy(embd->data, tokens, N*ggml_element_size(embd));
|
||||
|
Loading…
Reference in New Issue
Block a user