mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-07 11:23:56 +01:00
common : better n_gpu_layers
assignment
This commit is contained in:
parent
323a9d3b8c
commit
23360b15b6
@ -702,7 +702,9 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
|||||||
|
|
||||||
lparams.n_ctx = params.n_ctx;
|
lparams.n_ctx = params.n_ctx;
|
||||||
lparams.n_batch = params.n_batch;
|
lparams.n_batch = params.n_batch;
|
||||||
lparams.n_gpu_layers = params.n_gpu_layers != -1 ? params.n_gpu_layers : lparams.n_gpu_layers;
|
if (params.n_gpu_layers != -1) {
|
||||||
|
lparams.n_gpu_layers = params.n_gpu_layers;
|
||||||
|
}
|
||||||
lparams.main_gpu = params.main_gpu;
|
lparams.main_gpu = params.main_gpu;
|
||||||
lparams.tensor_split = params.tensor_split;
|
lparams.tensor_split = params.tensor_split;
|
||||||
lparams.low_vram = params.low_vram;
|
lparams.low_vram = params.low_vram;
|
||||||
|
Loading…
Reference in New Issue
Block a user