]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
flake.nix: suggest the binary caches
authorSomeone Serge <redacted>
Sat, 30 Dec 2023 18:25:25 +0000 (18:25 +0000)
committerPhilip Taron <redacted>
Sun, 31 Dec 2023 21:14:58 +0000 (13:14 -0800)
flake.nix

index 8d0f095d71d6d006a36eb6591eebc52252619135..488ed6c59d9637444f8226cbe608d37c24cd1a24 100644 (file)
--- a/flake.nix
+++ b/flake.nix
@@ -6,6 +6,29 @@
     flake-parts.url = "github:hercules-ci/flake-parts";
   };
 
+  # Optional binary cache
+  nixConfig = {
+    extra-substituters = [
+      # Populated by the CI in ggerganov/llama.cpp
+      "https://llama-cpp.cachix.org"
+
+      # A development cache for nixpkgs imported with `config.cudaSupport = true`.
+      # Populated by https://hercules-ci.com/github/SomeoneSerge/nixpkgs-cuda-ci.
+      # This lets one skip building e.g. the CUDA-enabled openmpi.
+      # TODO: Replace once nix-community obtains an official one.
+      "https://cuda-maintainers.cachix.org"
+    ];
+
+    # Verify these are the same keys as published on
+    # - https://app.cachix.org/cache/llama-cpp
+    # - https://app.cachix.org/cache/cuda-maintainers
+    extra-trusted-public-keys = [
+      "llama-cpp.cachix.org-1:H75X+w83wUKTIPSO1KWy9ADUrzThyGs8P5tmAbkWhQc="
+      "cuda-maintainers.cachix.org-1:0dq3bujKpuEPMCX6U4WylrUDZ9JyUG0VpVZa7CNfq5E="
+    ];
+  };
+
+
   # For inspection, use `nix flake show github:ggerganov/llama.cpp` or the nix repl:
   #
   # ```bash