]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
flake : fix ggml-metal.metal path and run nixfmt (#1974)
authorRowan Hart <redacted>
Sat, 24 Jun 2023 11:07:08 +0000 (04:07 -0700)
committerGitHub <redacted>
Sat, 24 Jun 2023 11:07:08 +0000 (14:07 +0300)
flake.nix

index bba3d71f7437b9b8cd0c8095dcf2d7cd8d5ddce5..cebb47b94c92e47e69296da8d169889ef4d371d6 100644 (file)
--- a/flake.nix
+++ b/flake.nix
@@ -9,27 +9,33 @@
         inherit (pkgs.stdenv) isAarch64 isDarwin;
         inherit (pkgs.lib) optionals;
         isM1 = isAarch64 && isDarwin;
-        osSpecific =
-          if isM1 then with pkgs.darwin.apple_sdk_11_0.frameworks; [ Accelerate MetalKit MetalPerformanceShaders MetalPerformanceShadersGraph ]
-          else if isDarwin then with pkgs.darwin.apple_sdk.frameworks; [ Accelerate CoreGraphics CoreVideo ]
-          else [ ];
-        pkgs = import nixpkgs {
-          inherit system;
-        };
-        llama-python = pkgs.python310.withPackages (ps: with ps; [
-          numpy
-          sentencepiece
-        ]);
-      in
-      {
+        osSpecific = if isM1 then
+          with pkgs.darwin.apple_sdk_11_0.frameworks; [
+            Accelerate
+            MetalKit
+            MetalPerformanceShaders
+            MetalPerformanceShadersGraph
+          ]
+        else if isDarwin then
+          with pkgs.darwin.apple_sdk.frameworks; [
+            Accelerate
+            CoreGraphics
+            CoreVideo
+          ]
+        else
+          [ ];
+        pkgs = import nixpkgs { inherit system; };
+        llama-python =
+          pkgs.python310.withPackages (ps: with ps; [ numpy sentencepiece ]);
+      in {
         packages.default = pkgs.stdenv.mkDerivation {
           name = "llama.cpp";
           src = ./.;
-          postPatch =
-            if isM1 then ''
-              substituteInPlace ./ggml-metal.m \
-                --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/ggml-metal.metal\";"
-            '' else "";
+          postPatch = if isM1 then ''
+            substituteInPlace ./ggml-metal.m \
+              --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
+          '' else
+            "";
           nativeBuildInputs = with pkgs; [ cmake ];
           buildInputs = osSpecific;
           cmakeFlags = [ "-DLLAMA_BUILD_SERVER=ON" ] ++ (optionals isM1 [
         };
         apps.default = self.apps.${system}.llama;
         devShells.default = pkgs.mkShell {
-          packages = with pkgs; [
-            cmake
-            llama-python
-          ] ++ osSpecific;
+          packages = with pkgs; [ cmake llama-python ] ++ osSpecific;
         };
-      }
-    );
+      });
 }