]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
rpc: fix register position (#11424)
authorFrank Mai <redacted>
Sun, 26 Jan 2025 15:20:34 +0000 (23:20 +0800)
committerGitHub <redacted>
Sun, 26 Jan 2025 15:20:34 +0000 (16:20 +0100)
Signed-off-by: thxCode <redacted>
src/llama-model.cpp
src/llama.cpp

index 031b4c30b75dddf133d6ecf58966c12c4aa444e7..18bd0b071bb9085fbf4fb0aa90ae06b87df0f5ef 100644 (file)
@@ -1303,10 +1303,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
     const int act_gpu_layers = devices.empty() ? 0 : std::min(n_gpu_layers, (int)n_layer + 1);
     auto get_layer_buft_list = [&](int il) -> llama_model::impl::layer_dev {
         if (il < i_gpu_start || (il - i_gpu_start) >= act_gpu_layers) {
+            LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s\n", il, ggml_backend_dev_name(cpu_dev));
             return {cpu_dev, &pimpl->cpu_buft_list};
         }
         const int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + n_devices(), float(il - i_gpu_start)/act_gpu_layers) - splits.begin();
         auto * dev = devices.at(layer_gpu);
+        LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s\n", il, ggml_backend_dev_name(dev));
         return {dev, &pimpl->gpu_buft_list.at(dev)};
     };
 
index e8cfe5012819ce1bb5d11f5973c2dfb79b48f95c..094157ccf2aa231e4b41e1b9d9e9e4f7f0a13c52 100644 (file)
@@ -9405,6 +9405,7 @@ static struct llama_model * llama_model_load_from_file_impl(
             model->devices.push_back(*dev);
         }
     } else {
+        std::vector<ggml_backend_dev_t> rpc_servers;
         // use all available devices
         for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
             ggml_backend_dev_t dev = ggml_backend_dev_get(i);
@@ -9415,10 +9416,19 @@ static struct llama_model * llama_model_load_from_file_impl(
                     break;
 
                 case GGML_BACKEND_DEVICE_TYPE_GPU:
-                    model->devices.push_back(dev);
+                    ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
+                    if (ggml_backend_reg_name(reg) == std::string("RPC")) {
+                        rpc_servers.push_back(dev);
+                    } else {
+                        model->devices.push_back(dev);
+                    }
                     break;
             }
         }
+        // add RPC servers at the front of the list
+        if (!rpc_servers.empty()) {
+            model->devices.insert(model->devices.begin(), rpc_servers.begin(), rpc_servers.end());
+        }
     }
 
     // if using single GPU mode, remove all except the main GPU