pkg_check_modules(DepBLAS REQUIRED flexiblas_api)
elseif (${LLAMA_BLAS_VENDOR} MATCHES "Intel")
# all Intel* libraries share the same include path
- pkg_check_modules(DepBLAS mkl-sdl)
- if (NOT DepBLAS)
- if (BUILD_SHARED_LIBS)
- set(LINK_METHOD dynamic)
- else()
- set(LINK_METHOD static)
- endif()
- string(REGEX REPLACE ".*_" "" DATA_TYPE_MODEL ${LLAMA_BLAS_VENDOR})
- pkg_check_modules(DepBLAS REQUIRED mkl-${LINK_METHOD}-${DATA_TYPE_MODEL}-iomp)
- endif()
+ pkg_check_modules(DepBLAS REQUIRED mkl-sdl)
elseif (${LLAMA_BLAS_VENDOR} MATCHES "NVHPC")
# this doesn't provide pkg-config
# suggest to assign BLAS_INCLUDE_DIRS on your own
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
- inherit (pkgs.stdenv) isAarch32 isAarch64 isx86_32 isx86_64 isDarwin;
+ inherit (pkgs.stdenv) isAarch32 isAarch64 isDarwin;
osSpecific = with pkgs; [ openmpi ] ++
(
if isAarch64 && isDarwin then
CoreGraphics
CoreVideo
]
- else if isx86_32 || isx86_64 then
- with pkgs; [ mkl ]
else
with pkgs; [ openblas ]
);
pkgs = import nixpkgs { inherit system; };
+ nativeBuildInputs = with pkgs; [ cmake pkgconfig ];
llama-python =
- pkgs.python310.withPackages (ps: with ps; [ numpy sentencepiece ]);
+ pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece ]);
in {
packages.default = pkgs.stdenv.mkDerivation {
name = "llama.cpp";
postPatch = ''
substituteInPlace ./ggml-metal.m \
--replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
+ substituteInPlace ./*.py --replace '/usr/bin/env python' '${llama-python}/bin/python'
'';
- nativeBuildInputs = with pkgs; [ cmake pkgconfig ];
+ nativeBuildInputs = nativeBuildInputs;
buildInputs = osSpecific;
cmakeFlags = [ "-DLLAMA_BUILD_SERVER=ON" "-DLLAMA_MPI=ON" "-DBUILD_SHARED_LIBS=ON" "-DCMAKE_SKIP_BUILD_RPATH=ON" ]
++ (if isAarch64 && isDarwin then [
"-DCMAKE_C_FLAGS=-D__ARM_FEATURE_DOTPROD=1"
"-DLLAMA_METAL=ON"
- ] else if isx86_32 || isx86_64 then [
- "-DLLAMA_BLAS=ON"
- "-DLLAMA_BLAS_VENDOR=Intel10_lp64"
] else [
"-DLLAMA_BLAS=ON"
"-DLLAMA_BLAS_VENDOR=OpenBLAS"
]);
- installPhase = ''
- runHook preInstall
-
- install -D bin/* -t $out/bin
- install -Dm644 lib*.so -t $out/lib
+ postInstall = ''
mv $out/bin/main $out/bin/llama
mv $out/bin/server $out/bin/llama-server
-
- echo "#!${llama-python}/bin/python" > $out/bin/convert.py
- cat ${./convert.py} >> $out/bin/convert.py
- chmod +x $out/bin/convert.py
-
- runHook postInstall
'';
meta.mainProgram = "llama";
};
};
apps.default = self.apps.${system}.llama;
devShells.default = pkgs.mkShell {
- packages = with pkgs; [ cmake llama-python ] ++ osSpecific;
+ packages = nativeBuildInputs ++ osSpecific;
};
});
}