const int act_gpu_layers = devices.empty() ? 0 : std::min(n_gpu_layers, (int)n_layer + 1);
auto get_layer_buft_list = [&](int il) -> llama_model::impl::layer_dev {
if (il < i_gpu_start || (il - i_gpu_start) >= act_gpu_layers) {
+ LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s\n", il, ggml_backend_dev_name(cpu_dev));
return {cpu_dev, &pimpl->cpu_buft_list};
}
const int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + n_devices(), float(il - i_gpu_start)/act_gpu_layers) - splits.begin();
auto * dev = devices.at(layer_gpu);
+ LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s\n", il, ggml_backend_dev_name(dev));
return {dev, &pimpl->gpu_buft_list.at(dev)};
};
model->devices.push_back(*dev);
}
} else {
+ std::vector<ggml_backend_dev_t> rpc_servers;
// use all available devices
for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
ggml_backend_dev_t dev = ggml_backend_dev_get(i);
break;
case GGML_BACKEND_DEVICE_TYPE_GPU:
- model->devices.push_back(dev);
+ ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
+ if (ggml_backend_reg_name(reg) == std::string("RPC")) {
+ rpc_servers.push_back(dev);
+ } else {
+ model->devices.push_back(dev);
+ }
break;
}
}
+ // add RPC servers at the front of the list
+ if (!rpc_servers.empty()) {
+ model->devices.insert(model->devices.begin(), rpc_servers.begin(), rpc_servers.end());
+ }
}
// if using single GPU mode, remove all except the main GPU