mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 13:28:50 +01:00
Correct free memory and total memory. (#6630)
Co-authored-by: MasterYi <zouxiaoyi@kylinos.cn>
This commit is contained in:
parent
81da18e71c
commit
dee7f8d692
@ -1638,17 +1638,17 @@ static size_t llama_get_device_memory(int device) {
|
||||
#if defined(GGML_USE_CUDA)
|
||||
size_t total;
|
||||
size_t free;
|
||||
ggml_backend_cuda_get_device_memory(device, &total, &free);
|
||||
ggml_backend_cuda_get_device_memory(device, &free, &total);
|
||||
return free;
|
||||
#elif defined(GGML_USE_SYCL)
|
||||
size_t total;
|
||||
size_t free;
|
||||
ggml_backend_sycl_get_device_memory(device, &total, &free);
|
||||
ggml_backend_sycl_get_device_memory(device, &free, &total);
|
||||
return free;
|
||||
#elif defined(GGML_USE_VULKAN)
|
||||
size_t total;
|
||||
size_t free;
|
||||
ggml_backend_vk_get_device_memory(device, &total, &free);
|
||||
ggml_backend_vk_get_device_memory(device, &free, &total);
|
||||
return free;
|
||||
#else
|
||||
return 1;
|
||||
|
Loading…
Reference in New Issue
Block a user