]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
nix : enable curl (#8043)
authorMichael Francis <redacted>
Mon, 1 Jul 2024 11:47:04 +0000 (07:47 -0400)
committerGitHub <redacted>
Mon, 1 Jul 2024 11:47:04 +0000 (14:47 +0300)
Co-authored-by: Georgi Gerganov <redacted>
.devops/nix/package.nix

index b75d7ff9e5bab507b546fb4cd7494925b7426fb3..49e9b75287b3380b6015724dc561f212125f269a 100644 (file)
@@ -17,6 +17,7 @@
   rocmPackages,
   vulkan-headers,
   vulkan-loader,
+  curl,
   useBlas ? builtins.all (x: !x) [
     useCuda
     useMetalKit
@@ -27,6 +28,7 @@
   useMetalKit ? stdenv.isAarch64 && stdenv.isDarwin,
   useMpi ? false, # Increases the runtime closure size by ~700M
   useRocm ? config.rocmSupport,
+  enableCurl ? true,
   useVulkan ? false,
   llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake
 
@@ -196,13 +198,15 @@ effectiveStdenv.mkDerivation (
       ++ optionals useMpi [ mpi ]
       ++ optionals useRocm rocmBuildInputs
       ++ optionals useBlas [ blas ]
-      ++ optionals useVulkan vulkanBuildInputs;
+      ++ optionals useVulkan vulkanBuildInputs
+      ++ optionals enableCurl [ curl ];
 
     cmakeFlags =
       [
         (cmakeBool "LLAMA_BUILD_SERVER" true)
         (cmakeBool "BUILD_SHARED_LIBS" (!enableStatic))
         (cmakeBool "CMAKE_SKIP_BUILD_RPATH" true)
+        (cmakeBool "LLAMA_CURL" enableCurl)
         (cmakeBool "GGML_NATIVE" false)
         (cmakeBool "GGML_BLAS" useBlas)
         (cmakeBool "GGML_CUDA" useCuda)