* flake : update flake.lock for newer transformers version + provide extra dev shell with torch and transformers (for most convert-xxx.py scripts)
},
"nixpkgs": {
"locked": {
- "lastModified": 1692913444,
- "narHash": "sha256-1SvMQm2DwofNxXVtNWWtIcTh7GctEVrS/Xel/mdc6iY=",
+ "lastModified": 1698134075,
+ "narHash": "sha256-foCD+nuKzfh49bIoiCBur4+Fx1nozo+4C/6k8BYk4sg=",
"owner": "NixOS",
"repo": "nixpkgs",
- "rev": "18324978d632ffc55ef1d928e81630c620f4f447",
+ "rev": "8efd5d1e283604f75a808a20e6cde0ef313d07d4",
"type": "github"
},
"original": {
};
llama-python =
pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece ]);
+ # TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime
+ llama-python-extra =
+ pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece torchWithoutCuda transformers ]);
postPatch = ''
substituteInPlace ./ggml-metal.m \
--replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
buildInputs = [ llama-python ];
packages = nativeBuildInputs ++ osSpecific;
};
+ devShells.extra = pkgs.mkShell {
+ buildInputs = [ llama-python-extra ];
+ packages = nativeBuildInputs ++ osSpecific;
+ };
});
}