]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
flake : support `nix build '.#opencl'` (#2337)
authorwzy <redacted>
Sun, 23 Jul 2023 11:57:02 +0000 (19:57 +0800)
committerGitHub <redacted>
Sun, 23 Jul 2023 11:57:02 +0000 (14:57 +0300)
flake.nix

index 7f148f1444bcd39046e6b4f3fe793c0f71e4faf6..4178e97ffc5d0273674f7b104a078d18754647eb 100644 (file)
--- a/flake.nix
+++ b/flake.nix
@@ -7,7 +7,8 @@
     flake-utils.lib.eachDefaultSystem (system:
       let
         inherit (pkgs.stdenv) isAarch32 isAarch64 isDarwin;
-        osSpecific = with pkgs; [ openmpi ] ++
+        buildInputs = with pkgs; [ openmpi ];
+        osSpecific = with pkgs; buildInputs ++
         (
           if isAarch64 && isDarwin then
             with pkgs.darwin.apple_sdk_11_0.frameworks; [
         nativeBuildInputs = with pkgs; [ cmake pkgconfig ];
         llama-python =
           pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece ]);
+        postPatch = ''
+          substituteInPlace ./ggml-metal.m \
+            --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
+          substituteInPlace ./*.py --replace '/usr/bin/env python' '${llama-python}/bin/python'
+        '';
+        postInstall = ''
+          mv $out/bin/main $out/bin/llama
+          mv $out/bin/server $out/bin/llama-server
+        '';
+        cmakeFlags = [ "-DLLAMA_BUILD_SERVER=ON" "-DLLAMA_MPI=ON" "-DBUILD_SHARED_LIBS=ON" "-DCMAKE_SKIP_BUILD_RPATH=ON" ];
       in {
         packages.default = pkgs.stdenv.mkDerivation {
           name = "llama.cpp";
           src = ./.;
-          postPatch = ''
-            substituteInPlace ./ggml-metal.m \
-              --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
-            substituteInPlace ./*.py --replace '/usr/bin/env python' '${llama-python}/bin/python'
-          '';
+          postPatch = postPatch;
           nativeBuildInputs = nativeBuildInputs;
           buildInputs = osSpecific;
-          cmakeFlags = [ "-DLLAMA_BUILD_SERVER=ON" "-DLLAMA_MPI=ON" "-DBUILD_SHARED_LIBS=ON" "-DCMAKE_SKIP_BUILD_RPATH=ON" ]
+          cmakeFlags = cmakeFlags
             ++ (if isAarch64 && isDarwin then [
               "-DCMAKE_C_FLAGS=-D__ARM_FEATURE_DOTPROD=1"
               "-DLLAMA_METAL=ON"
               "-DLLAMA_BLAS=ON"
               "-DLLAMA_BLAS_VENDOR=OpenBLAS"
           ]);
-          postInstall = ''
-            mv $out/bin/main $out/bin/llama
-            mv $out/bin/server $out/bin/llama-server
-          '';
+          postInstall = postInstall;
+          meta.mainProgram = "llama";
+        };
+        packages.opencl = pkgs.stdenv.mkDerivation {
+          name = "llama.cpp";
+          src = ./.;
+          postPatch = postPatch;
+          nativeBuildInputs = nativeBuildInputs;
+          buildInputs = with pkgs; buildInputs ++ [ clblast ];
+          cmakeFlags = cmakeFlags ++ [
+            "-DLLAMA_CLBLAST=ON"
+          ];
+          postInstall = postInstall;
           meta.mainProgram = "llama";
         };
         apps.llama-server = {