mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 13:28:50 +01:00
cuda : print message when initialization fails (#5512)
* cuda : print message when initialization fails * use CUDA_NAME both times
This commit is contained in:
parent
9350a1cf21
commit
9060a1e9df
@ -7943,6 +7943,7 @@ GGML_CALL void ggml_init_cublas() {
|
||||
if (cudaGetDeviceCount(&g_device_count) != cudaSuccess) {
|
||||
initialized = true;
|
||||
g_cublas_loaded = false;
|
||||
fprintf(stderr, "%s: no " GGML_CUDA_NAME " devices found, " GGML_CUDA_NAME " will be disabled\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user