#include <clocale>
#include <cmath>
#include <cstdio>
+#include <cstdlib>
#include <cstring>
#include <ctime>
-#include <cstdlib>
#include <iterator>
#include <map>
#include <numeric>
#include <regex>
#include <sstream>
#include <string>
-#include <vector>
#include <thread>
+#include <vector>
+#include "common.h"
#include "ggml.h"
#include "llama.h"
-#include "common.h"
#ifdef _WIN32
-#define WIN32_LEAN_AND_MEAN
-#ifndef NOMINMAX
-# define NOMINMAX
-#endif
-#include <windows.h>
+# define WIN32_LEAN_AND_MEAN
+# ifndef NOMINMAX
+# define NOMINMAX
+# endif
+# include <windows.h>
#endif
// utils
return std::chrono::nanoseconds(clock::now().time_since_epoch()).count();
}
-template<class T>
-static std::string join(const std::vector<T> & values, const std::string & delim) {
+template <class T> static std::string join(const std::vector<T> & values, const std::string & delim) {
std::ostringstream str;
for (size_t i = 0; i < values.size(); i++) {
str << values[i];
return str.str();
}
-template<typename T, typename F>
-static std::vector<std::string> transform_to_str(const std::vector<T> & values, F f) {
+template <typename T, typename F> static std::vector<std::string> transform_to_str(const std::vector<T> & values, F f) {
std::vector<std::string> str_values;
std::transform(values.begin(), values.end(), std::back_inserter(str_values), f);
return str_values;
}
-template<typename T>
-static T avg(const std::vector<T> & v) {
+template <typename T> static T avg(const std::vector<T> & v) {
if (v.empty()) {
return 0;
}
T sum = std::accumulate(v.begin(), v.end(), T(0));
- return sum / (T)v.size();
+ return sum / (T) v.size();
}
-template<typename T>
-static T stdev(const std::vector<T> & v) {
+template <typename T> static T stdev(const std::vector<T> & v) {
if (v.size() <= 1) {
return 0;
}
- T mean = avg(v);
+ T mean = avg(v);
T sq_sum = std::inner_product(v.begin(), v.end(), v.begin(), T(0));
- T stdev = std::sqrt(sq_sum / (T)(v.size() - 1) - mean * mean * (T)v.size() / (T)(v.size() - 1));
+ T stdev = std::sqrt(sq_sum / (T) (v.size() - 1) - mean * mean * (T) v.size() / (T) (v.size() - 1));
return stdev;
}
static std::string get_cpu_info() {
std::vector<std::string> cpu_list;
for (size_t i = 0; i < ggml_backend_dev_count(); i++) {
- auto * dev = ggml_backend_dev_get(i);
- auto dev_type = ggml_backend_dev_type(dev);
+ auto * dev = ggml_backend_dev_get(i);
+ auto dev_type = ggml_backend_dev_type(dev);
if (dev_type == GGML_BACKEND_DEVICE_TYPE_CPU || dev_type == GGML_BACKEND_DEVICE_TYPE_ACCEL) {
cpu_list.push_back(ggml_backend_dev_description(dev));
}
static std::string get_gpu_info() {
std::vector<std::string> gpu_list;
for (size_t i = 0; i < ggml_backend_dev_count(); i++) {
- auto * dev = ggml_backend_dev_get(i);
- auto dev_type = ggml_backend_dev_type(dev);
+ auto * dev = ggml_backend_dev_get(i);
+ auto dev_type = ggml_backend_dev_type(dev);
if (dev_type == GGML_BACKEND_DEVICE_TYPE_GPU) {
gpu_list.push_back(ggml_backend_dev_description(dev));
}
}
// command line params
-enum output_formats {NONE, CSV, JSON, JSONL, MARKDOWN, SQL};
+enum output_formats { NONE, CSV, JSON, JSONL, MARKDOWN, SQL };
static const char * output_format_str(output_formats format) {
switch (format) {
- case NONE: return "none";
- case CSV: return "csv";
- case JSON: return "json";
- case JSONL: return "jsonl";
- case MARKDOWN: return "md";
- case SQL: return "sql";
- default: GGML_ABORT("invalid output format");
+ case NONE:
+ return "none";
+ case CSV:
+ return "csv";
+ case JSON:
+ return "json";
+ case JSONL:
+ return "jsonl";
+ case MARKDOWN:
+ return "md";
+ case SQL:
+ return "sql";
+ default:
+ GGML_ABORT("invalid output format");
}
}
static const char * split_mode_str(llama_split_mode mode) {
switch (mode) {
- case LLAMA_SPLIT_MODE_NONE: return "none";
- case LLAMA_SPLIT_MODE_LAYER: return "layer";
- case LLAMA_SPLIT_MODE_ROW: return "row";
- default: GGML_ABORT("invalid split mode");
+ case LLAMA_SPLIT_MODE_NONE:
+ return "none";
+ case LLAMA_SPLIT_MODE_LAYER:
+ return "layer";
+ case LLAMA_SPLIT_MODE_ROW:
+ return "row";
+ default:
+ GGML_ABORT("invalid split mode");
}
}
}
struct cmd_params {
- std::vector<std::string> model;
- std::vector<int> n_prompt;
- std::vector<int> n_gen;
+ std::vector<std::string> model;
+ std::vector<int> n_prompt;
+ std::vector<int> n_gen;
std::vector<std::pair<int, int>> n_pg;
- std::vector<int> n_batch;
- std::vector<int> n_ubatch;
- std::vector<ggml_type> type_k;
- std::vector<ggml_type> type_v;
- std::vector<int> n_threads;
- std::vector<std::string> cpu_mask;
- std::vector<bool> cpu_strict;
- std::vector<int> poll;
- std::vector<int> n_gpu_layers;
- std::vector<std::string> rpc_servers;
- std::vector<llama_split_mode> split_mode;
- std::vector<int> main_gpu;
- std::vector<bool> no_kv_offload;
- std::vector<bool> flash_attn;
- std::vector<std::vector<float>> tensor_split;
- std::vector<bool> use_mmap;
- std::vector<bool> embeddings;
- ggml_numa_strategy numa;
- int reps;
- ggml_sched_priority prio;
- int delay;
- bool verbose;
- bool progress;
- output_formats output_format;
- output_formats output_format_stderr;
+ std::vector<int> n_batch;
+ std::vector<int> n_ubatch;
+ std::vector<ggml_type> type_k;
+ std::vector<ggml_type> type_v;
+ std::vector<int> n_threads;
+ std::vector<std::string> cpu_mask;
+ std::vector<bool> cpu_strict;
+ std::vector<int> poll;
+ std::vector<int> n_gpu_layers;
+ std::vector<std::string> rpc_servers;
+ std::vector<llama_split_mode> split_mode;
+ std::vector<int> main_gpu;
+ std::vector<bool> no_kv_offload;
+ std::vector<bool> flash_attn;
+ std::vector<std::vector<float>> tensor_split;
+ std::vector<bool> use_mmap;
+ std::vector<bool> embeddings;
+ ggml_numa_strategy numa;
+ int reps;
+ ggml_sched_priority prio;
+ int delay;
+ bool verbose;
+ bool progress;
+ output_formats output_format;
+ output_formats output_format_stderr;
};
static const cmd_params cmd_params_defaults = {
- /* model */ {"models/7B/ggml-model-q4_0.gguf"},
- /* n_prompt */ {512},
- /* n_gen */ {128},
+ /* model */ { "models/7B/ggml-model-q4_0.gguf" },
+ /* n_prompt */ { 512 },
+ /* n_gen */ { 128 },
/* n_pg */ {},
- /* n_batch */ {2048},
- /* n_ubatch */ {512},
- /* type_k */ {GGML_TYPE_F16},
- /* type_v */ {GGML_TYPE_F16},
- /* n_threads */ {cpu_get_num_math()},
- /* cpu_mask */ {"0x0"},
- /* cpu_strict */ {false},
- /* poll */ {50},
- /* n_gpu_layers */ {99},
- /* rpc_servers */ {""},
- /* split_mode */ {LLAMA_SPLIT_MODE_LAYER},
- /* main_gpu */ {0},
- /* no_kv_offload */ {false},
- /* flash_attn */ {false},
- /* tensor_split */ {std::vector<float>(llama_max_devices(), 0.0f)},
- /* use_mmap */ {true},
- /* embeddings */ {false},
+ /* n_batch */ { 2048 },
+ /* n_ubatch */ { 512 },
+ /* type_k */ { GGML_TYPE_F16 },
+ /* type_v */ { GGML_TYPE_F16 },
+ /* n_threads */ { cpu_get_num_math() },
+ /* cpu_mask */ { "0x0" },
+ /* cpu_strict */ { false },
+ /* poll */ { 50 },
+ /* n_gpu_layers */ { 99 },
+ /* rpc_servers */ { "" },
+ /* split_mode */ { LLAMA_SPLIT_MODE_LAYER },
+ /* main_gpu */ { 0 },
+ /* no_kv_offload */ { false },
+ /* flash_attn */ { false },
+ /* tensor_split */ { std::vector<float>(llama_max_devices(), 0.0f) },
+ /* use_mmap */ { true },
+ /* embeddings */ { false },
/* numa */ GGML_NUMA_STRATEGY_DISABLED,
/* reps */ 5,
/* prio */ GGML_SCHED_PRIO_NORMAL,
printf("options:\n");
printf(" -h, --help\n");
printf(" -m, --model <filename> (default: %s)\n", join(cmd_params_defaults.model, ",").c_str());
- printf(" -p, --n-prompt <n> (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str());
+ printf(" -p, --n-prompt <n> (default: %s)\n",
+ join(cmd_params_defaults.n_prompt, ",").c_str());
printf(" -n, --n-gen <n> (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str());
- printf(" -pg <pp,tg> (default: %s)\n", join(transform_to_str(cmd_params_defaults.n_pg, pair_str), ",").c_str());
- printf(" -b, --batch-size <n> (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str());
- printf(" -ub, --ubatch-size <n> (default: %s)\n", join(cmd_params_defaults.n_ubatch, ",").c_str());
- printf(" -ctk, --cache-type-k <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str());
- printf(" -ctv, --cache-type-v <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str());
- printf(" -t, --threads <n> (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
- printf(" -C, --cpu-mask <hex,hex> (default: %s)\n", join(cmd_params_defaults.cpu_mask, ",").c_str());
- printf(" --cpu-strict <0|1> (default: %s)\n", join(cmd_params_defaults.cpu_strict, ",").c_str());
+ printf(" -pg <pp,tg> (default: %s)\n",
+ join(transform_to_str(cmd_params_defaults.n_pg, pair_str), ",").c_str());
+ printf(" -b, --batch-size <n> (default: %s)\n",
+ join(cmd_params_defaults.n_batch, ",").c_str());
+ printf(" -ub, --ubatch-size <n> (default: %s)\n",
+ join(cmd_params_defaults.n_ubatch, ",").c_str());
+ printf(" -ctk, --cache-type-k <t> (default: %s)\n",
+ join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str());
+ printf(" -ctv, --cache-type-v <t> (default: %s)\n",
+ join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str());
+ printf(" -t, --threads <n> (default: %s)\n",
+ join(cmd_params_defaults.n_threads, ",").c_str());
+ printf(" -C, --cpu-mask <hex,hex> (default: %s)\n",
+ join(cmd_params_defaults.cpu_mask, ",").c_str());
+ printf(" --cpu-strict <0|1> (default: %s)\n",
+ join(cmd_params_defaults.cpu_strict, ",").c_str());
printf(" --poll <0...100> (default: %s)\n", join(cmd_params_defaults.poll, ",").c_str());
- printf(" -ngl, --n-gpu-layers <n> (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str());
+ printf(" -ngl, --n-gpu-layers <n> (default: %s)\n",
+ join(cmd_params_defaults.n_gpu_layers, ",").c_str());
if (llama_supports_rpc()) {
- printf(" -rpc, --rpc <rpc_servers> (default: %s)\n", join(cmd_params_defaults.rpc_servers, ",").c_str());
+ printf(" -rpc, --rpc <rpc_servers> (default: %s)\n",
+ join(cmd_params_defaults.rpc_servers, ",").c_str());
}
- printf(" -sm, --split-mode <none|layer|row> (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str());
- printf(" -mg, --main-gpu <i> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
- printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str());
- printf(" -fa, --flash-attn <0|1> (default: %s)\n", join(cmd_params_defaults.flash_attn, ",").c_str());
- printf(" -mmp, --mmap <0|1> (default: %s)\n", join(cmd_params_defaults.use_mmap, ",").c_str());
+ printf(" -sm, --split-mode <none|layer|row> (default: %s)\n",
+ join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str());
+ printf(" -mg, --main-gpu <i> (default: %s)\n",
+ join(cmd_params_defaults.main_gpu, ",").c_str());
+ printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n",
+ join(cmd_params_defaults.no_kv_offload, ",").c_str());
+ printf(" -fa, --flash-attn <0|1> (default: %s)\n",
+ join(cmd_params_defaults.flash_attn, ",").c_str());
+ printf(" -mmp, --mmap <0|1> (default: %s)\n",
+ join(cmd_params_defaults.use_mmap, ",").c_str());
printf(" --numa <distribute|isolate|numactl> (default: disabled)\n");
- printf(" -embd, --embeddings <0|1> (default: %s)\n", join(cmd_params_defaults.embeddings, ",").c_str());
+ printf(" -embd, --embeddings <0|1> (default: %s)\n",
+ join(cmd_params_defaults.embeddings, ",").c_str());
printf(" -ts, --tensor-split <ts0/ts1/..> (default: 0)\n");
printf(" -r, --repetitions <n> (default: %d)\n", cmd_params_defaults.reps);
printf(" --prio <0|1|2|3> (default: %d)\n", cmd_params_defaults.prio);
printf(" --delay <0...N> (seconds) (default: %d)\n", cmd_params_defaults.delay);
- printf(" -o, --output <csv|json|jsonl|md|sql> (default: %s)\n", output_format_str(cmd_params_defaults.output_format));
- printf(" -oe, --output-err <csv|json|jsonl|md|sql> (default: %s)\n", output_format_str(cmd_params_defaults.output_format_stderr));
+ printf(" -o, --output <csv|json|jsonl|md|sql> (default: %s)\n",
+ output_format_str(cmd_params_defaults.output_format));
+ printf(" -oe, --output-err <csv|json|jsonl|md|sql> (default: %s)\n",
+ output_format_str(cmd_params_defaults.output_format_stderr));
printf(" -v, --verbose (default: %s)\n", cmd_params_defaults.verbose ? "1" : "0");
printf(" --progress (default: %s)\n", cmd_params_defaults.progress ? "1" : "0");
printf("\n");
- printf("Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.\n");
+ printf(
+ "Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter "
+ "multiple times.\n");
}
static ggml_type ggml_type_from_name(const std::string & s) {
return GGML_TYPE_COUNT;
}
-
static cmd_params parse_cmd_params(int argc, char ** argv) {
- cmd_params params;
- std::string arg;
- bool invalid_param = false;
- const std::string arg_prefix = "--";
- const char split_delim = ',';
-
- params.verbose = cmd_params_defaults.verbose;
- params.output_format = cmd_params_defaults.output_format;
+ cmd_params params;
+ std::string arg;
+ bool invalid_param = false;
+ const std::string arg_prefix = "--";
+ const char split_delim = ',';
+
+ params.verbose = cmd_params_defaults.verbose;
+ params.output_format = cmd_params_defaults.output_format;
params.output_format_stderr = cmd_params_defaults.output_format_stderr;
- params.reps = cmd_params_defaults.reps;
- params.numa = cmd_params_defaults.numa;
- params.prio = cmd_params_defaults.prio;
- params.delay = cmd_params_defaults.delay;
- params.progress = cmd_params_defaults.progress;
+ params.reps = cmd_params_defaults.reps;
+ params.numa = cmd_params_defaults.numa;
+ params.prio = cmd_params_defaults.prio;
+ params.delay = cmd_params_defaults.delay;
+ params.progress = cmd_params_defaults.progress;
for (int i = 1; i < argc; i++) {
arg = argv[i];
invalid_param = true;
break;
}
- params.n_pg.push_back({std::stoi(p[0]), std::stoi(p[1])});
+ params.n_pg.push_back({ std::stoi(p[0]), std::stoi(p[1]) });
} else if (arg == "-b" || arg == "--batch-size") {
if (++i >= argc) {
invalid_param = true;
invalid_param = true;
break;
}
- auto p = string_split<std::string>(argv[i], split_delim);
+ auto p = string_split<std::string>(argv[i], split_delim);
std::vector<ggml_type> types;
for (const auto & t : p) {
ggml_type gt = ggml_type_from_name(t);
invalid_param = true;
break;
}
- auto p = string_split<std::string>(argv[i], split_delim);
+ auto p = string_split<std::string>(argv[i], split_delim);
std::vector<ggml_type> types;
for (const auto & t : p) {
ggml_type gt = ggml_type_from_name(t);
invalid_param = true;
break;
}
- auto p = string_split<std::string>(argv[i], split_delim);
+ auto p = string_split<std::string>(argv[i], split_delim);
std::vector<llama_split_mode> modes;
for (const auto & m : p) {
llama_split_mode mode;
break;
} else {
std::string value(argv[i]);
- /**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
- else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
- else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
- else { invalid_param = true; break; }
+ /**/ if (value == "distribute" || value == "") {
+ params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE;
+ } else if (value == "isolate") {
+ params.numa = GGML_NUMA_STRATEGY_ISOLATE;
+ } else if (value == "numactl") {
+ params.numa = GGML_NUMA_STRATEGY_NUMACTL;
+ } else {
+ invalid_param = true;
+ break;
+ }
}
} else if (arg == "-fa" || arg == "--flash-attn") {
if (++i >= argc) {
}
for (auto ts : string_split<std::string>(argv[i], split_delim)) {
// split string by ; and /
- const std::regex regex{R"([;/]+)"};
- std::sregex_token_iterator it{ts.begin(), ts.end(), regex, -1};
- std::vector<std::string> split_arg{it, {}};
+ const std::regex regex{ R"([;/]+)" };
+ std::sregex_token_iterator it{ ts.begin(), ts.end(), regex, -1 };
+ std::vector<std::string> split_arg{ it, {} };
GGML_ASSERT(split_arg.size() <= llama_max_devices());
std::vector<float> tensor_split(llama_max_devices());
}
// set defaults
- if (params.model.empty()) { params.model = cmd_params_defaults.model; }
- if (params.n_prompt.empty()) { params.n_prompt = cmd_params_defaults.n_prompt; }
- if (params.n_gen.empty()) { params.n_gen = cmd_params_defaults.n_gen; }
- if (params.n_pg.empty()) { params.n_pg = cmd_params_defaults.n_pg; }
- if (params.n_batch.empty()) { params.n_batch = cmd_params_defaults.n_batch; }
- if (params.n_ubatch.empty()) { params.n_ubatch = cmd_params_defaults.n_ubatch; }
- if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; }
- if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; }
- if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; }
- if (params.rpc_servers.empty()) { params.rpc_servers = cmd_params_defaults.rpc_servers; }
- if (params.split_mode.empty()) { params.split_mode = cmd_params_defaults.split_mode; }
- if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; }
- if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; }
- if (params.flash_attn.empty()) { params.flash_attn = cmd_params_defaults.flash_attn; }
- if (params.tensor_split.empty()) { params.tensor_split = cmd_params_defaults.tensor_split; }
- if (params.use_mmap.empty()) { params.use_mmap = cmd_params_defaults.use_mmap; }
- if (params.embeddings.empty()) { params.embeddings = cmd_params_defaults.embeddings; }
- if (params.n_threads.empty()) { params.n_threads = cmd_params_defaults.n_threads; }
- if (params.cpu_mask.empty()) { params.cpu_mask = cmd_params_defaults.cpu_mask; }
- if (params.cpu_strict.empty()) { params.cpu_strict = cmd_params_defaults.cpu_strict; }
- if (params.poll.empty()) { params.poll = cmd_params_defaults.poll; }
+ if (params.model.empty()) {
+ params.model = cmd_params_defaults.model;
+ }
+ if (params.n_prompt.empty()) {
+ params.n_prompt = cmd_params_defaults.n_prompt;
+ }
+ if (params.n_gen.empty()) {
+ params.n_gen = cmd_params_defaults.n_gen;
+ }
+ if (params.n_pg.empty()) {
+ params.n_pg = cmd_params_defaults.n_pg;
+ }
+ if (params.n_batch.empty()) {
+ params.n_batch = cmd_params_defaults.n_batch;
+ }
+ if (params.n_ubatch.empty()) {
+ params.n_ubatch = cmd_params_defaults.n_ubatch;
+ }
+ if (params.type_k.empty()) {
+ params.type_k = cmd_params_defaults.type_k;
+ }
+ if (params.type_v.empty()) {
+ params.type_v = cmd_params_defaults.type_v;
+ }
+ if (params.n_gpu_layers.empty()) {
+ params.n_gpu_layers = cmd_params_defaults.n_gpu_layers;
+ }
+ if (params.rpc_servers.empty()) {
+ params.rpc_servers = cmd_params_defaults.rpc_servers;
+ }
+ if (params.split_mode.empty()) {
+ params.split_mode = cmd_params_defaults.split_mode;
+ }
+ if (params.main_gpu.empty()) {
+ params.main_gpu = cmd_params_defaults.main_gpu;
+ }
+ if (params.no_kv_offload.empty()) {
+ params.no_kv_offload = cmd_params_defaults.no_kv_offload;
+ }
+ if (params.flash_attn.empty()) {
+ params.flash_attn = cmd_params_defaults.flash_attn;
+ }
+ if (params.tensor_split.empty()) {
+ params.tensor_split = cmd_params_defaults.tensor_split;
+ }
+ if (params.use_mmap.empty()) {
+ params.use_mmap = cmd_params_defaults.use_mmap;
+ }
+ if (params.embeddings.empty()) {
+ params.embeddings = cmd_params_defaults.embeddings;
+ }
+ if (params.n_threads.empty()) {
+ params.n_threads = cmd_params_defaults.n_threads;
+ }
+ if (params.cpu_mask.empty()) {
+ params.cpu_mask = cmd_params_defaults.cpu_mask;
+ }
+ if (params.cpu_strict.empty()) {
+ params.cpu_strict = cmd_params_defaults.cpu_strict;
+ }
+ if (params.poll.empty()) {
+ params.poll = cmd_params_defaults.poll;
+ }
return params;
}
struct cmd_params_instance {
- std::string model;
- int n_prompt;
- int n_gen;
- int n_batch;
- int n_ubatch;
- ggml_type type_k;
- ggml_type type_v;
- int n_threads;
- std::string cpu_mask;
- bool cpu_strict;
- int poll;
- int n_gpu_layers;
- std::string rpc_servers;
- llama_split_mode split_mode;
- int main_gpu;
- bool no_kv_offload;
- bool flash_attn;
+ std::string model;
+ int n_prompt;
+ int n_gen;
+ int n_batch;
+ int n_ubatch;
+ ggml_type type_k;
+ ggml_type type_v;
+ int n_threads;
+ std::string cpu_mask;
+ bool cpu_strict;
+ int poll;
+ int n_gpu_layers;
+ std::string rpc_servers;
+ llama_split_mode split_mode;
+ int main_gpu;
+ bool no_kv_offload;
+ bool flash_attn;
std::vector<float> tensor_split;
- bool use_mmap;
- bool embeddings;
+ bool use_mmap;
+ bool embeddings;
llama_model_params to_llama_mparams() const {
llama_model_params mparams = llama_model_default_params();
if (!rpc_servers.empty()) {
mparams.rpc_servers = rpc_servers.c_str();
}
- mparams.split_mode = split_mode;
- mparams.main_gpu = main_gpu;
+ mparams.split_mode = split_mode;
+ mparams.main_gpu = main_gpu;
mparams.tensor_split = tensor_split.data();
- mparams.use_mmap = use_mmap;
+ mparams.use_mmap = use_mmap;
return mparams;
}
bool equal_mparams(const cmd_params_instance & other) const {
- return model == other.model &&
- n_gpu_layers == other.n_gpu_layers &&
- rpc_servers == other.rpc_servers &&
- split_mode == other.split_mode &&
- main_gpu == other.main_gpu &&
- use_mmap == other.use_mmap &&
+ return model == other.model && n_gpu_layers == other.n_gpu_layers && rpc_servers == other.rpc_servers &&
+ split_mode == other.split_mode && main_gpu == other.main_gpu && use_mmap == other.use_mmap &&
tensor_split == other.tensor_split;
}
llama_context_params to_llama_cparams() const {
llama_context_params cparams = llama_context_default_params();
- cparams.n_ctx = n_prompt + n_gen;
- cparams.n_batch = n_batch;
- cparams.n_ubatch = n_ubatch;
- cparams.type_k = type_k;
- cparams.type_v = type_v;
+ cparams.n_ctx = n_prompt + n_gen;
+ cparams.n_batch = n_batch;
+ cparams.n_ubatch = n_ubatch;
+ cparams.type_k = type_k;
+ cparams.type_v = type_v;
cparams.offload_kqv = !no_kv_offload;
- cparams.flash_attn = flash_attn;
- cparams.embeddings = embeddings;
+ cparams.flash_attn = flash_attn;
+ cparams.embeddings = embeddings;
return cparams;
}
std::vector<cmd_params_instance> instances;
// this ordering minimizes the number of times that each model needs to be reloaded
+ // clang-format off
for (const auto & m : params.model)
for (const auto & nl : params.n_gpu_layers)
for (const auto & rpc : params.rpc_servers)
instances.push_back(instance);
}
}
+ // clang-format on
return instances;
}
struct test {
static const std::string build_commit;
- static const int build_number;
+ static const int build_number;
static const std::string cpu_info;
static const std::string gpu_info;
- std::string model_filename;
- std::string model_type;
- uint64_t model_size;
- uint64_t model_n_params;
- int n_batch;
- int n_ubatch;
- int n_threads;
- std::string cpu_mask;
- bool cpu_strict;
- int poll;
- ggml_type type_k;
- ggml_type type_v;
- int n_gpu_layers;
- llama_split_mode split_mode;
- int main_gpu;
- bool no_kv_offload;
- bool flash_attn;
- std::vector<float> tensor_split;
- bool use_mmap;
- bool embeddings;
- int n_prompt;
- int n_gen;
- std::string test_time;
- std::vector<uint64_t> samples_ns;
+ std::string model_filename;
+ std::string model_type;
+ uint64_t model_size;
+ uint64_t model_n_params;
+ int n_batch;
+ int n_ubatch;
+ int n_threads;
+ std::string cpu_mask;
+ bool cpu_strict;
+ int poll;
+ ggml_type type_k;
+ ggml_type type_v;
+ int n_gpu_layers;
+ llama_split_mode split_mode;
+ int main_gpu;
+ bool no_kv_offload;
+ bool flash_attn;
+ std::vector<float> tensor_split;
+ bool use_mmap;
+ bool embeddings;
+ int n_prompt;
+ int n_gen;
+ std::string test_time;
+ std::vector<uint64_t> samples_ns;
test(const cmd_params_instance & inst, const llama_model * lmodel, const llama_context * ctx) {
model_filename = inst.model;
char buf[128];
llama_model_desc(lmodel, buf, sizeof(buf));
- model_type = buf;
- model_size = llama_model_size(lmodel);
+ model_type = buf;
+ model_size = llama_model_size(lmodel);
model_n_params = llama_model_n_params(lmodel);
- n_batch = inst.n_batch;
- n_ubatch = inst.n_ubatch;
- n_threads = inst.n_threads;
- cpu_mask = inst.cpu_mask;
- cpu_strict = inst.cpu_strict;
- poll = inst.poll;
- type_k = inst.type_k;
- type_v = inst.type_v;
- n_gpu_layers = inst.n_gpu_layers;
- split_mode = inst.split_mode;
- main_gpu = inst.main_gpu;
- no_kv_offload = inst.no_kv_offload;
- flash_attn = inst.flash_attn;
- tensor_split = inst.tensor_split;
- use_mmap = inst.use_mmap;
- embeddings = inst.embeddings;
- n_prompt = inst.n_prompt;
- n_gen = inst.n_gen;
+ n_batch = inst.n_batch;
+ n_ubatch = inst.n_ubatch;
+ n_threads = inst.n_threads;
+ cpu_mask = inst.cpu_mask;
+ cpu_strict = inst.cpu_strict;
+ poll = inst.poll;
+ type_k = inst.type_k;
+ type_v = inst.type_v;
+ n_gpu_layers = inst.n_gpu_layers;
+ split_mode = inst.split_mode;
+ main_gpu = inst.main_gpu;
+ no_kv_offload = inst.no_kv_offload;
+ flash_attn = inst.flash_attn;
+ tensor_split = inst.tensor_split;
+ use_mmap = inst.use_mmap;
+ embeddings = inst.embeddings;
+ n_prompt = inst.n_prompt;
+ n_gen = inst.n_gen;
// RFC 3339 date-time format
- time_t t = time(NULL);
+ time_t t = time(NULL);
std::strftime(buf, sizeof(buf), "%FT%TZ", gmtime(&t));
test_time = buf;
(void) ctx;
}
- uint64_t avg_ns() const {
- return ::avg(samples_ns);
- }
+ uint64_t avg_ns() const { return ::avg(samples_ns); }
- uint64_t stdev_ns() const {
- return ::stdev(samples_ns);
- }
+ uint64_t stdev_ns() const { return ::stdev(samples_ns); }
std::vector<double> get_ts() const {
- int n_tokens = n_prompt + n_gen;
+ int n_tokens = n_prompt + n_gen;
std::vector<double> ts;
- std::transform(samples_ns.begin(), samples_ns.end(), std::back_inserter(ts), [n_tokens](uint64_t t) { return 1e9 * n_tokens / t; });
+ std::transform(samples_ns.begin(), samples_ns.end(), std::back_inserter(ts),
+ [n_tokens](uint64_t t) { return 1e9 * n_tokens / t; });
return ts;
}
- double avg_ts() const {
- return ::avg(get_ts());
- }
+ double avg_ts() const { return ::avg(get_ts()); }
- double stdev_ts() const {
- return ::stdev(get_ts());
- }
+ double stdev_ts() const { return ::stdev(get_ts()); }
static std::string get_backend() {
std::vector<std::string> backends;
for (size_t i = 0; i < ggml_backend_reg_count(); i++) {
- auto * reg = ggml_backend_reg_get(i);
+ auto * reg = ggml_backend_reg_get(i);
std::string name = ggml_backend_reg_name(reg);
if (name != "CPU") {
backends.push_back(ggml_backend_reg_name(reg));
static const std::vector<std::string> & get_fields() {
static const std::vector<std::string> fields = {
- "build_commit", "build_number",
- "cpu_info", "gpu_info", "backends",
- "model_filename", "model_type", "model_size", "model_n_params",
- "n_batch", "n_ubatch",
- "n_threads", "cpu_mask", "cpu_strict", "poll",
- "type_k", "type_v",
- "n_gpu_layers", "split_mode",
- "main_gpu", "no_kv_offload", "flash_attn",
- "tensor_split", "use_mmap", "embeddings",
- "n_prompt", "n_gen", "test_time",
- "avg_ns", "stddev_ns",
- "avg_ts", "stddev_ts",
+ "build_commit", "build_number", "cpu_info", "gpu_info", "backends", "model_filename",
+ "model_type", "model_size", "model_n_params", "n_batch", "n_ubatch", "n_threads",
+ "cpu_mask", "cpu_strict", "poll", "type_k", "type_v", "n_gpu_layers",
+ "split_mode", "main_gpu", "no_kv_offload", "flash_attn", "tensor_split", "use_mmap",
+ "embeddings", "n_prompt", "n_gen", "test_time", "avg_ns", "stddev_ns",
+ "avg_ts", "stddev_ts",
};
return fields;
}
- enum field_type {STRING, BOOL, INT, FLOAT};
+ enum field_type { STRING, BOOL, INT, FLOAT };
static field_type get_field_type(const std::string & field) {
- if (field == "build_number" || field == "n_batch" || field == "n_ubatch" ||
- field == "n_threads" || field == "poll" ||
- field == "model_size" || field == "model_n_params" ||
- field == "n_gpu_layers" || field == "main_gpu" ||
- field == "n_prompt" || field == "n_gen" ||
- field == "avg_ns" || field == "stddev_ns") {
+ if (field == "build_number" || field == "n_batch" || field == "n_ubatch" || field == "n_threads" ||
+ field == "poll" || field == "model_size" || field == "model_n_params" || field == "n_gpu_layers" ||
+ field == "main_gpu" || field == "n_prompt" || field == "n_gen" || field == "avg_ns" ||
+ field == "stddev_ns") {
return INT;
}
- if (field == "f16_kv" || field == "no_kv_offload" ||
- field == "cpu_strict" ||
- field == "flash_attn" || field == "use_mmap" || field == "embeddings") {
+ if (field == "f16_kv" || field == "no_kv_offload" || field == "cpu_strict" || field == "flash_attn" ||
+ field == "use_mmap" || field == "embeddings") {
return BOOL;
}
if (field == "avg_ts" || field == "stddev_ts") {
std::vector<std::string> get_values() const {
std::string tensor_split_str;
- int max_nonzero = 0;
+ int max_nonzero = 0;
for (size_t i = 0; i < llama_max_devices(); i++) {
if (tensor_split[i] > 0) {
max_nonzero = i;
tensor_split_str += "/";
}
}
- std::vector<std::string> values = {
- build_commit, std::to_string(build_number),
- cpu_info, gpu_info, get_backend(),
- model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params),
- std::to_string(n_batch), std::to_string(n_ubatch),
- std::to_string(n_threads), cpu_mask, std::to_string(cpu_strict), std::to_string(poll),
- ggml_type_name(type_k), ggml_type_name(type_v),
- std::to_string(n_gpu_layers), split_mode_str(split_mode),
- std::to_string(main_gpu), std::to_string(no_kv_offload), std::to_string(flash_attn),
- tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings),
- std::to_string(n_prompt), std::to_string(n_gen), test_time,
- std::to_string(avg_ns()), std::to_string(stdev_ns()),
- std::to_string(avg_ts()), std::to_string(stdev_ts())
- };
+ std::vector<std::string> values = { build_commit,
+ std::to_string(build_number),
+ cpu_info,
+ gpu_info,
+ get_backend(),
+ model_filename,
+ model_type,
+ std::to_string(model_size),
+ std::to_string(model_n_params),
+ std::to_string(n_batch),
+ std::to_string(n_ubatch),
+ std::to_string(n_threads),
+ cpu_mask,
+ std::to_string(cpu_strict),
+ std::to_string(poll),
+ ggml_type_name(type_k),
+ ggml_type_name(type_v),
+ std::to_string(n_gpu_layers),
+ split_mode_str(split_mode),
+ std::to_string(main_gpu),
+ std::to_string(no_kv_offload),
+ std::to_string(flash_attn),
+ tensor_split_str,
+ std::to_string(use_mmap),
+ std::to_string(embeddings),
+ std::to_string(n_prompt),
+ std::to_string(n_gen),
+ test_time,
+ std::to_string(avg_ns()),
+ std::to_string(stdev_ns()),
+ std::to_string(avg_ts()),
+ std::to_string(stdev_ts()) };
return values;
}
std::map<std::string, std::string> get_map() const {
std::map<std::string, std::string> map;
- auto fields = get_fields();
- auto values = get_values();
- std::transform(fields.begin(), fields.end(), values.begin(),
- std::inserter(map, map.end()), std::make_pair<const std::string &, const std::string &>);
+ auto fields = get_fields();
+ auto values = get_values();
+ std::transform(fields.begin(), fields.end(), values.begin(), std::inserter(map, map.end()),
+ std::make_pair<const std::string &, const std::string &>);
return map;
}
};
virtual ~printer() {}
FILE * fout;
+
virtual void print_header(const cmd_params & params) { (void) params; }
+
virtual void print_test(const test & t) = 0;
- virtual void print_footer() { }
+
+ virtual void print_footer() {}
};
struct csv_printer : public printer {
return escaped;
}
- void print_header(const cmd_params & params) override {
+ void print_header(const cmd_params & params) override {
std::vector<std::string> fields = test::get_fields();
fprintf(fout, "%s\n", join(fields, ",").c_str());
(void) params;
}
};
-
static std::string escape_json(const std::string & value) {
std::string escaped;
for (auto c : value) {
escaped += "\\\"";
} else if (c == '\\') {
escaped += "\\\\";
- } else if (c <= 0x1f) {
+ } else if (c <= 0x1f) {
char buf[8];
snprintf(buf, sizeof(buf), "\\u%04x", c);
escaped += buf;
void print_fields(const std::vector<std::string> & fields, const std::vector<std::string> & values) {
assert(fields.size() == values.size());
for (size_t i = 0; i < fields.size(); i++) {
- fprintf(fout, " \"%s\": %s,\n", fields.at(i).c_str(), format_json_value(fields.at(i), values.at(i)).c_str());
+ fprintf(fout, " \"%s\": %s,\n", fields.at(i).c_str(),
+ format_json_value(fields.at(i), values.at(i)).c_str());
}
}
fflush(fout);
}
- void print_footer() override {
- fprintf(fout, "\n]\n");
- }
+ void print_footer() override { fprintf(fout, "\n]\n"); }
};
-
struct jsonl_printer : public printer {
void print_fields(const std::vector<std::string> & fields, const std::vector<std::string> & values) {
assert(fields.size() == values.size());
return 13;
}
- int width = std::max((int)field.length(), 10);
+ int width = std::max((int) field.length(), 10);
if (test::get_field_type(field) == test::STRING) {
return -width;
fprintf(fout, "|");
for (const auto & field : fields) {
std::string value;
- char buf[128];
+ char buf[128];
if (field == "model") {
value = t.model_type;
} else if (field == "size") {
- if (t.model_size < 1024*1024*1024) {
+ if (t.model_size < 1024 * 1024 * 1024) {
snprintf(buf, sizeof(buf), "%.2f MiB", t.model_size / 1024.0 / 1024.0);
} else {
snprintf(buf, sizeof(buf), "%.2f GiB", t.model_size / 1024.0 / 1024.0 / 1024.0);
}
value = buf;
} else if (field == "params") {
- if (t.model_n_params < 1000*1000*1000) {
+ if (t.model_n_params < 1000 * 1000 * 1000) {
snprintf(buf, sizeof(buf), "%.2f M", t.model_n_params / 1e6);
} else {
snprintf(buf, sizeof(buf), "%.2f B", t.model_n_params / 1e9);
std::vector<std::string> fields = test::get_fields();
fprintf(fout, "CREATE TABLE IF NOT EXISTS test (\n");
for (size_t i = 0; i < fields.size(); i++) {
- fprintf(fout, " %s %s%s\n", fields.at(i).c_str(), get_sql_field_type(fields.at(i)).c_str(), i < fields.size() - 1 ? "," : "");
+ fprintf(fout, " %s %s%s\n", fields.at(i).c_str(), get_sql_field_type(fields.at(i)).c_str(),
+ i < fields.size() - 1 ? "," : "");
}
fprintf(fout, ");\n");
fprintf(fout, "\n");
static void test_prompt(llama_context * ctx, int n_prompt, int n_batch, int n_threads) {
llama_set_n_threads(ctx, n_threads, n_threads);
- const llama_model * model = llama_get_model(ctx);
- const int32_t n_vocab = llama_n_vocab(model);
+ const llama_model * model = llama_get_model(ctx);
+ const int32_t n_vocab = llama_n_vocab(model);
std::vector<llama_token> tokens(n_batch);
while (n_processed < n_prompt) {
int n_tokens = std::min(n_prompt - n_processed, n_batch);
- tokens[0] = n_processed == 0 && llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab;
+ tokens[0] = n_processed == 0 && llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab;
for (int i = 1; i < n_tokens; i++) {
tokens[i] = std::rand() % n_vocab;
}
static void test_gen(llama_context * ctx, int n_gen, int n_threads) {
llama_set_n_threads(ctx, n_threads, n_threads);
- const llama_model * model = llama_get_model(ctx);
- const int32_t n_vocab = llama_n_vocab(model);
+ const llama_model * model = llama_get_model(ctx);
+ const int32_t n_vocab = llama_n_vocab(model);
llama_token token = llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab;
set_process_priority(params.prio);
// initialize printer
- std::unique_ptr<printer> p = create_printer(params.output_format);
+ std::unique_ptr<printer> p = create_printer(params.output_format);
std::unique_ptr<printer> p_err = create_printer(params.output_format_stderr);
if (p) {
std::vector<cmd_params_instance> params_instances = get_cmd_params_instances(params);
- llama_model * lmodel = nullptr;
+ llama_model * lmodel = nullptr;
const cmd_params_instance * prev_inst = nullptr;
- int params_idx = 0;
+ int params_idx = 0;
auto params_count = params_instances.size();
for (const auto & inst : params_instances) {
- params_idx ++;
+ params_idx++;
if (params.progress) {
fprintf(stderr, "llama-bench: benchmark %d/%ld: starting\n", params_idx, params_count);
}
tpp.poll = t.poll;
tpp.prio = params.prio;
- struct ggml_threadpool* threadpool = ggml_threadpool_new(&tpp);
+ struct ggml_threadpool * threadpool = ggml_threadpool_new(&tpp);
if (!threadpool) {
fprintf(stderr, "%s: threadpool create failed : n_threads %d\n", __func__, tpp.n_threads);
exit(1);
if (t.n_prompt > 0) {
if (params.progress) {
- fprintf(stderr, "llama-bench: benchmark %d/%ld: prompt run %d/%d\n", params_idx, params_count, i + 1, params.reps);
+ fprintf(stderr, "llama-bench: benchmark %d/%ld: prompt run %d/%d\n", params_idx, params_count,
+ i + 1, params.reps);
}
test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads);
}
if (t.n_gen > 0) {
if (params.progress) {
- fprintf(stderr, "llama-bench: benchmark %d/%ld: generation run %d/%d\n", params_idx, params_count, i + 1, params.reps);
+ fprintf(stderr, "llama-bench: benchmark %d/%ld: generation run %d/%d\n", params_idx, params_count,
+ i + 1, params.reps);
}
test_gen(ctx, t.n_gen, t.n_threads);
}