]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Install rpc-server when GGML_RPC is ON. (#17149)
authorNicolas B. Pierron <redacted>
Tue, 11 Nov 2025 10:53:59 +0000 (11:53 +0100)
committerGitHub <redacted>
Tue, 11 Nov 2025 10:53:59 +0000 (10:53 +0000)
.devops/nix/package.nix
tools/rpc/CMakeLists.txt

index 41748e89d5cd5f43addb246b0551a3f61b56e239..a13996bd68da1fe19caebcdf8551a3b9b8da2810 100644 (file)
@@ -34,6 +34,7 @@
   rocmGpuTargets ? builtins.concatStringsSep ";" rocmPackages.clr.gpuTargets,
   enableCurl ? true,
   useVulkan ? false,
+  useRpc ? false,
   llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake
 
   # It's necessary to consistently use backendStdenv when building with CUDA support,
@@ -175,6 +176,7 @@ effectiveStdenv.mkDerivation (finalAttrs: {
       (cmakeBool "GGML_METAL" useMetalKit)
       (cmakeBool "GGML_VULKAN" useVulkan)
       (cmakeBool "GGML_STATIC" enableStatic)
+      (cmakeBool "GGML_RPC" useRpc)
     ]
     ++ optionals useCuda [
       (
index c2c748148645e66aeeae90066f07f56665bf1701..20f114ad9bae2c9e745e9d9b9e754230f5365a35 100644 (file)
@@ -2,3 +2,7 @@ set(TARGET rpc-server)
 add_executable(${TARGET} rpc-server.cpp)
 target_link_libraries(${TARGET} PRIVATE ggml)
 target_compile_features(${TARGET} PRIVATE cxx_std_17)
+
+if(LLAMA_TOOLS_INSTALL)
+    install(TARGETS ${TARGET} RUNTIME)
+endif()