}
// CPU: ACCEL -> GPU host -> CPU extra -> CPU
-static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & devices, bool use_extra_bufts) {
+static buft_list_t make_cpu_buft_list(const std::vector<ggml_backend_dev_t> & devices, bool use_extra_bufts, bool no_host) {
buft_list_t buft_list;
// add ACCEL buffer types
// generally, this will be done using the first device in the list
// a better approach would be to handle this on a weight-by-weight basis using the offload_op
// function of the device to determine if it would benefit from being stored in a host buffer
- for (auto * dev : devices) {
- ggml_backend_buffer_type_t buft = ggml_backend_dev_host_buffer_type(dev);
- if (buft) {
- buft_list.emplace_back(dev, buft);
- break;
+ if (!no_host) {
+ for (auto * dev : devices) {
+ ggml_backend_buffer_type_t buft = ggml_backend_dev_host_buffer_type(dev);
+ if (buft) {
+ buft_list.emplace_back(dev, buft);
+ break;
+ }
}
}
LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, ml.use_mmap ? "true" : "false");
// build a list of buffer types for the CPU and GPU devices
- pimpl->cpu_buft_list = make_cpu_buft_list(devices, params.use_extra_bufts);
+ pimpl->cpu_buft_list = make_cpu_buft_list(devices, params.use_extra_bufts, params.no_host);
for (auto * dev : devices) {
buft_list_t buft_list = make_gpu_buft_list(dev, split_mode, tensor_split);
// add CPU buffer types as a fallback
/*.use_mlock =*/ false,
/*.check_tensors =*/ false,
/*.use_extra_bufts =*/ true,
+ /*.no_host =*/ false,
};
return result;
std::vector<bool> use_mmap;
std::vector<bool> embeddings;
std::vector<bool> no_op_offload;
+ std::vector<bool> no_host;
ggml_numa_strategy numa;
int reps;
ggml_sched_priority prio;
/* use_mmap */ { true },
/* embeddings */ { false },
/* no_op_offload */ { false },
+ /* no_host */ { false },
/* numa */ GGML_NUMA_STRATEGY_DISABLED,
/* reps */ 5,
/* prio */ GGML_SCHED_PRIO_NORMAL,
printf(" -ot --override-tensor <tensor name pattern>=<buffer type>;...\n");
printf(" (default: disabled)\n");
printf(" -nopo, --no-op-offload <0|1> (default: 0)\n");
+ printf(" --no-host <0|1> (default: %s)\n",
+ join(cmd_params_defaults.no_host, ",").c_str());
printf("\n");
printf(
"Multiple values can be given for each parameter by separating them with ','\n"
}
auto p = string_split<bool>(argv[i], split_delim);
params.no_op_offload.insert(params.no_op_offload.end(), p.begin(), p.end());
+ } else if (arg == "--no-host") {
+ if (++i >= argc) {
+ invalid_param = true;
+ break;
+ }
+ auto p = string_split<bool>(argv[i], split_delim);
+ params.no_host.insert(params.no_host.end(), p.begin(), p.end());
} else if (arg == "-ts" || arg == "--tensor-split") {
if (++i >= argc) {
invalid_param = true;
if (params.no_op_offload.empty()) {
params.no_op_offload = cmd_params_defaults.no_op_offload;
}
+ if (params.no_host.empty()) {
+ params.no_host = cmd_params_defaults.no_host;
+ }
if (params.n_threads.empty()) {
params.n_threads = cmd_params_defaults.n_threads;
}
bool use_mmap;
bool embeddings;
bool no_op_offload;
+ bool no_host;
llama_model_params to_llama_mparams() const {
llama_model_params mparams = llama_model_default_params();
mparams.main_gpu = main_gpu;
mparams.tensor_split = tensor_split.data();
mparams.use_mmap = use_mmap;
+ mparams.no_host = no_host;
if (n_cpu_moe <= 0) {
if (tensor_buft_overrides.empty()) {
split_mode == other.split_mode &&
main_gpu == other.main_gpu && use_mmap == other.use_mmap && tensor_split == other.tensor_split &&
devices == other.devices &&
+ no_host == other.no_host &&
vec_tensor_buft_override_equal(tensor_buft_overrides, other.tensor_buft_overrides);
}
for (const auto & ts : params.tensor_split)
for (const auto & ot : params.tensor_buft_overrides)
for (const auto & mmp : params.use_mmap)
+ for (const auto & noh : params.no_host)
for (const auto & embd : params.embeddings)
for (const auto & nopo : params.no_op_offload)
for (const auto & nb : params.n_batch)
/* .use_mmap = */ mmp,
/* .embeddings = */ embd,
/* .no_op_offload= */ nopo,
+ /* .no_host = */ noh,
};
instances.push_back(instance);
}
/* .use_mmap = */ mmp,
/* .embeddings = */ embd,
/* .no_op_offload= */ nopo,
+ /* .no_host = */ noh,
};
instances.push_back(instance);
}
/* .use_mmap = */ mmp,
/* .embeddings = */ embd,
/* .no_op_offload= */ nopo,
+ /* .no_host = */ noh,
};
instances.push_back(instance);
}
bool use_mmap;
bool embeddings;
bool no_op_offload;
+ bool no_host;
int n_prompt;
int n_gen;
int n_depth;
use_mmap = inst.use_mmap;
embeddings = inst.embeddings;
no_op_offload = inst.no_op_offload;
+ no_host = inst.no_host;
n_prompt = inst.n_prompt;
n_gen = inst.n_gen;
n_depth = inst.n_depth;
"type_k", "type_v", "n_gpu_layers", "n_cpu_moe", "split_mode",
"main_gpu", "no_kv_offload", "flash_attn", "devices", "tensor_split",
"tensor_buft_overrides", "use_mmap", "embeddings", "no_op_offload",
- "n_prompt", "n_gen", "n_depth", "test_time", "avg_ns",
- "stddev_ns", "avg_ts", "stddev_ts"
+ "no_host", "n_prompt", "n_gen", "n_depth", "test_time",
+ "avg_ns", "stddev_ns", "avg_ts", "stddev_ts"
};
return fields;
}
return INT;
}
if (field == "f16_kv" || field == "no_kv_offload" || field == "cpu_strict" || field == "flash_attn" ||
- field == "use_mmap" || field == "embeddings") {
+ field == "use_mmap" || field == "embeddings" || field == "no_host") {
return BOOL;
}
if (field == "avg_ts" || field == "stddev_ts") {
std::to_string(use_mmap),
std::to_string(embeddings),
std::to_string(no_op_offload),
+ std::to_string(no_host),
std::to_string(n_prompt),
std::to_string(n_gen),
std::to_string(n_depth),
if (field == "no_op_offload") {
return 4;
}
+ if (field == "no_host") {
+ return 4;
+ }
int width = std::max((int) field.length(), 10);
if (field == "no_op_offload") {
return "nopo";
}
+ if (field == "no_host") {
+ return "noh";
+ }
if (field == "devices") {
return "dev";
}
if (params.no_op_offload.size() > 1 || params.no_op_offload != cmd_params_defaults.no_op_offload) {
fields.emplace_back("no_op_offload");
}
+ if (params.no_host.size() > 1 || params.no_host != cmd_params_defaults.no_host) {
+ fields.emplace_back("no_host");
+ }
fields.emplace_back("test");
fields.emplace_back("t/s");