mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-23 09:59:18 +01:00
Revert "make : add optional CUDA_NATIVE_ARCH (#2482)"
This reverts commit 96981f37b1
.
See:
https://github.com/ggerganov/llama.cpp/pull/2482#issuecomment-1775975866
This commit is contained in:
parent
9d02956443
commit
e3932593d4
5
Makefile
5
Makefile
@ -391,12 +391,9 @@ else
|
|||||||
endif #LLAMA_CUDA_NVCC
|
endif #LLAMA_CUDA_NVCC
|
||||||
ifdef CUDA_DOCKER_ARCH
|
ifdef CUDA_DOCKER_ARCH
|
||||||
NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH)
|
NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH)
|
||||||
endif # CUDA_DOCKER_ARCH
|
|
||||||
ifdef CUDA_NATIVE_ARCH
|
|
||||||
NVCCFLAGS += -arch=$(CUDA_NATIVE_ARCH)
|
|
||||||
else
|
else
|
||||||
NVCCFLAGS += -arch=native
|
NVCCFLAGS += -arch=native
|
||||||
endif # CUDA_NATIVE_ARCH
|
endif # CUDA_DOCKER_ARCH
|
||||||
ifdef LLAMA_CUDA_FORCE_DMMV
|
ifdef LLAMA_CUDA_FORCE_DMMV
|
||||||
NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV
|
NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV
|
||||||
endif # LLAMA_CUDA_FORCE_DMMV
|
endif # LLAMA_CUDA_FORCE_DMMV
|
||||||
|
Loading…
Reference in New Issue
Block a user