]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Revert "make : add optional CUDA_NATIVE_ARCH (#2482)"
authorGeorgi Gerganov <redacted>
Mon, 23 Oct 2023 20:46:05 +0000 (23:46 +0300)
committerGeorgi Gerganov <redacted>
Mon, 23 Oct 2023 20:46:05 +0000 (23:46 +0300)
This reverts commit 96981f37b1e3f450d9e63e571514217bf60f0a7f.

See:

https://github.com/ggerganov/llama.cpp/pull/2482#issuecomment-1775975866

Makefile

index 705c4acb4a0a98b6874eabe734671595e483d202..80179631f95a5b25780ade471adf3e883fc69f06 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -391,12 +391,9 @@ else
 endif #LLAMA_CUDA_NVCC
 ifdef CUDA_DOCKER_ARCH
        NVCCFLAGS += -Wno-deprecated-gpu-targets -arch=$(CUDA_DOCKER_ARCH)
-endif # CUDA_DOCKER_ARCH
-ifdef CUDA_NATIVE_ARCH
-       NVCCFLAGS += -arch=$(CUDA_NATIVE_ARCH)
 else
        NVCCFLAGS += -arch=native
-endif # CUDA_NATIVE_ARCH
+endif # CUDA_DOCKER_ARCH
 ifdef LLAMA_CUDA_FORCE_DMMV
        NVCCFLAGS += -DGGML_CUDA_FORCE_DMMV
 endif # LLAMA_CUDA_FORCE_DMMV