params.speculative.model.path = value;
}
).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_MODEL_DRAFT"));
+ add_opt(common_arg(
+ {"-ctkd", "--cache-type-k-draft"}, "TYPE",
+ string_format(
+ "KV cache data type for K for the draft model\n"
+ "allowed values: %s\n"
+ "(default: %s)",
+ get_all_kv_cache_types().c_str(),
+ ggml_type_name(params.speculative.cache_type_k)
+ ),
+ [](common_params & params, const std::string & value) {
+ params.speculative.cache_type_k = kv_cache_type_from_str(value);
+ }
+ ).set_env("LLAMA_ARG_CACHE_TYPE_K_DRAFT"));
+ add_opt(common_arg(
+ {"-ctvd", "--cache-type-v-draft"}, "TYPE",
+ string_format(
+ "KV cache data type for V for the draft model\n"
+ "allowed values: %s\n"
+ "(default: %s)",
+ get_all_kv_cache_types().c_str(),
+ ggml_type_name(params.speculative.cache_type_v)
+ ),
+ [](common_params & params, const std::string & value) {
+ params.speculative.cache_type_v = kv_cache_type_from_str(value);
+ }
+ ).set_env("LLAMA_ARG_CACHE_TYPE_V_DRAFT"));
add_opt(common_arg(
{"-mv", "--model-vocoder"}, "FNAME",
float p_split = 0.1f; // speculative decoding split probability
float p_min = 0.75f; // minimum speculative decoding probability (greedy)
+ ggml_type cache_type_k = GGML_TYPE_F16; // KV cache data type for the K
+ ggml_type cache_type_v = GGML_TYPE_F16; // KV cache data type for the V
+
struct cpu_params cpuparams;
struct cpu_params cpuparams_batch;
| `-devd, --device-draft <dev1,dev2,..>` | comma-separated list of devices to use for offloading the draft model (none = don't offload)<br/>use --list-devices to see a list of available devices |
| `-ngld, --gpu-layers-draft, --n-gpu-layers-draft N` | number of layers to store in VRAM for the draft model<br/>(env: LLAMA_ARG_N_GPU_LAYERS_DRAFT) |
| `-md, --model-draft FNAME` | draft model for speculative decoding (default: unused)<br/>(env: LLAMA_ARG_MODEL_DRAFT) |
+| `-ctkd, --cache-type-k-draft TYPE` | KV cache data type for K for speculative decoding model<br/>allowed values: f32, f16, bf16, q8_0, q4_0, q4_1, iq4_nl, q5_0, q5_1<br/>(default: f16)<br/>(env: LLAMA_ARG_CACHE_TYPE_K_DRAFT) |
+| `-ctvd, --cache-type-v-draft TYPE` | KV cache data type for V for speculative decoding model<br/>allowed values: f32, f16, bf16, q8_0, q4_0, q4_1, iq4_nl, q5_0, q5_1<br/>(default: f16)<br/>(env: LLAMA_ARG_CACHE_TYPE_V_DRAFT) |
| `-mv, --model-vocoder FNAME` | vocoder model for audio generation (default: unused) |
| `--tts-use-guide-tokens` | Use guide tokens to improve TTS word recall |
| `--embd-bge-small-en-default` | use default bge-small-en-v1.5 model (note: can download weights from the internet) |
params_dft.n_ctx = params_base.speculative.n_ctx == 0 ? params_base.n_ctx / params_base.n_parallel : params_base.speculative.n_ctx;
params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers;
params_dft.n_parallel = 1;
-
- // force F16 KV cache for the draft model for extra performance
- params_dft.cache_type_k = GGML_TYPE_F16;
- params_dft.cache_type_v = GGML_TYPE_F16;
+ params_dft.cache_type_k = params_base.speculative.cache_type_k;
+ params_dft.cache_type_v = params_base.speculative.cache_type_v;
llama_init_dft = common_init_from_params(params_dft);