From: Nicolas B. Pierron Date: Tue, 11 Nov 2025 10:53:59 +0000 (+0100) Subject: Install rpc-server when GGML_RPC is ON. (#17149) X-Git-Tag: upstream/0.0.7446~426 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=d2d626938aa7b0137df6a808e0637151806a9d5a;p=pkg%2Fggml%2Fsources%2Fllama.cpp Install rpc-server when GGML_RPC is ON. (#17149) --- diff --git a/.devops/nix/package.nix b/.devops/nix/package.nix index 41748e89..a13996bd 100644 --- a/.devops/nix/package.nix +++ b/.devops/nix/package.nix @@ -34,6 +34,7 @@ rocmGpuTargets ? builtins.concatStringsSep ";" rocmPackages.clr.gpuTargets, enableCurl ? true, useVulkan ? false, + useRpc ? false, llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake # It's necessary to consistently use backendStdenv when building with CUDA support, @@ -175,6 +176,7 @@ effectiveStdenv.mkDerivation (finalAttrs: { (cmakeBool "GGML_METAL" useMetalKit) (cmakeBool "GGML_VULKAN" useVulkan) (cmakeBool "GGML_STATIC" enableStatic) + (cmakeBool "GGML_RPC" useRpc) ] ++ optionals useCuda [ ( diff --git a/tools/rpc/CMakeLists.txt b/tools/rpc/CMakeLists.txt index c2c74814..20f114ad 100644 --- a/tools/rpc/CMakeLists.txt +++ b/tools/rpc/CMakeLists.txt @@ -2,3 +2,7 @@ set(TARGET rpc-server) add_executable(${TARGET} rpc-server.cpp) target_link_libraries(${TARGET} PRIVATE ggml) target_compile_features(${TARGET} PRIVATE cxx_std_17) + +if(LLAMA_TOOLS_INSTALL) + install(TARGETS ${TARGET} RUNTIME) +endif()