]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
flake : remove intel mkl from flake.nix due to missing files (#2277)
authorwzy <redacted>
Fri, 21 Jul 2023 10:26:34 +0000 (18:26 +0800)
committerGitHub <redacted>
Fri, 21 Jul 2023 10:26:34 +0000 (13:26 +0300)
NixOS's mkl misses some libraries like mkl-sdl.pc. See #2261
Currently NixOS doesn't have intel C compiler (icx, icpx). See https://discourse.nixos.org/t/packaging-intel-math-kernel-libraries-mkl/975
So remove it from flake.nix

Some minor changes:

- Change pkgs.python310 to pkgs.python3 to keep latest
- Add pkgconfig to devShells.default
- Remove installPhase because we have `cmake --install` from #2256

CMakeLists.txt
README.md
flake.nix

index 1693327674f8071859fa8f8dd20f8b3ec378816c..abc96814d632df0bdf66ffdce4cb6164b99c9c0f 100644 (file)
@@ -186,16 +186,7 @@ if (LLAMA_BLAS)
                 pkg_check_modules(DepBLAS REQUIRED flexiblas_api)
             elseif (${LLAMA_BLAS_VENDOR} MATCHES "Intel")
                 # all Intel* libraries share the same include path
-                pkg_check_modules(DepBLAS mkl-sdl)
-                if (NOT DepBLAS)
-                    if (BUILD_SHARED_LIBS)
-                        set(LINK_METHOD dynamic)
-                    else()
-                        set(LINK_METHOD static)
-                    endif()
-                    string(REGEX REPLACE ".*_" "" DATA_TYPE_MODEL ${LLAMA_BLAS_VENDOR})
-                    pkg_check_modules(DepBLAS REQUIRED mkl-${LINK_METHOD}-${DATA_TYPE_MODEL}-iomp)
-                endif()
+                pkg_check_modules(DepBLAS REQUIRED mkl-sdl)
             elseif (${LLAMA_BLAS_VENDOR} MATCHES "NVHPC")
                 # this doesn't provide pkg-config
                 # suggest to assign BLAS_INCLUDE_DIRS on your own
index 073b621e94cd94dea7aae8e8dcdf08f6f665f70d..f45e4bf0849f06b4f7dbbb0eb4e79e4bf9a42f2e 100644 (file)
--- a/README.md
+++ b/README.md
@@ -360,7 +360,7 @@ Building the program with BLAS support may lead to some performance improvements
   ```bash
   mkdir build
   cd build
-  cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_lp64 -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
+  cmake .. -DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
   cmake --build . --config Release
   ```
 
index 5657e825880410da2253ee7d34c71520e27c0f8a..7f148f1444bcd39046e6b4f3fe793c0f71e4faf6 100644 (file)
--- a/flake.nix
+++ b/flake.nix
@@ -6,7 +6,7 @@
   outputs = { self, nixpkgs, flake-utils }:
     flake-utils.lib.eachDefaultSystem (system:
       let
-        inherit (pkgs.stdenv) isAarch32 isAarch64 isx86_32 isx86_64 isDarwin;
+        inherit (pkgs.stdenv) isAarch32 isAarch64 isDarwin;
         osSpecific = with pkgs; [ openmpi ] ++
         (
           if isAarch64 && isDarwin then
               CoreGraphics
               CoreVideo
             ]
-          else if isx86_32 || isx86_64 then
-            with pkgs; [ mkl ]
           else
             with pkgs; [ openblas ]
         );
         pkgs = import nixpkgs { inherit system; };
+        nativeBuildInputs = with pkgs; [ cmake pkgconfig ];
         llama-python =
-          pkgs.python310.withPackages (ps: with ps; [ numpy sentencepiece ]);
+          pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece ]);
       in {
         packages.default = pkgs.stdenv.mkDerivation {
           name = "llama.cpp";
           postPatch = ''
             substituteInPlace ./ggml-metal.m \
               --replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
+            substituteInPlace ./*.py --replace '/usr/bin/env python' '${llama-python}/bin/python'
           '';
-          nativeBuildInputs = with pkgs; [ cmake pkgconfig ];
+          nativeBuildInputs = nativeBuildInputs;
           buildInputs = osSpecific;
           cmakeFlags = [ "-DLLAMA_BUILD_SERVER=ON" "-DLLAMA_MPI=ON" "-DBUILD_SHARED_LIBS=ON" "-DCMAKE_SKIP_BUILD_RPATH=ON" ]
             ++ (if isAarch64 && isDarwin then [
               "-DCMAKE_C_FLAGS=-D__ARM_FEATURE_DOTPROD=1"
               "-DLLAMA_METAL=ON"
-            ] else if isx86_32 || isx86_64 then [
-              "-DLLAMA_BLAS=ON"
-              "-DLLAMA_BLAS_VENDOR=Intel10_lp64"
             ] else [
               "-DLLAMA_BLAS=ON"
               "-DLLAMA_BLAS_VENDOR=OpenBLAS"
           ]);
-          installPhase = ''
-            runHook preInstall
-
-            install -D bin/* -t $out/bin
-            install -Dm644 lib*.so -t $out/lib
+          postInstall = ''
             mv $out/bin/main $out/bin/llama
             mv $out/bin/server $out/bin/llama-server
-
-            echo "#!${llama-python}/bin/python" > $out/bin/convert.py
-            cat ${./convert.py} >> $out/bin/convert.py
-            chmod +x $out/bin/convert.py
-
-            runHook postInstall
           '';
           meta.mainProgram = "llama";
         };
@@ -81,7 +68,7 @@
         };
         apps.default = self.apps.${system}.llama;
         devShells.default = pkgs.mkShell {
-          packages = with pkgs; [ cmake llama-python ] ++ osSpecific;
+          packages = nativeBuildInputs ++ osSpecific;
         };
       });
 }