mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 13:28:50 +01:00
make : add optional CUDA_NATIVE_ARCH (#2482)
Use the environment variable `CUDA_NATIVE_ARCH` if present to set NVCC arch. Otherwise, use `native`.
This commit is contained in:
parent
438c2ca830
commit
96981f37b1
5
Makefile
5
Makefile
@ -391,9 +391,12 @@ else
|
||||
endif #LLAMA_CUDA_NVCC
|
||||
ifdef CUDA_DOCKER_ARCH
|
||||
NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH)
|
||||
endif # CUDA_DOCKER_ARCH
|
||||
ifdef CUDA_NATIVE_ARCH
|
||||
NVCCFLAGS += -arch=$(CUDA_NATIVE_ARCH)
|
||||
else
|
||||
NVCCFLAGS += -arch=native
|
||||
endif # CUDA_DOCKER_ARCH
|
||||
endif # CUDA_NATIVE_ARCH
|
||||
ifdef LLAMA_CUDA_FORCE_DMMV
|
||||
NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV
|
||||
endif # LLAMA_CUDA_FORCE_DMMV
|
||||
|
Loading…
Reference in New Issue
Block a user