- [Trending](https://huggingface.co/models?library=gguf&sort=trending)
- [LLaMA](https://huggingface.co/models?sort=trending&search=llama+gguf)
+You can either manually download the GGUF file or directly use any `llama.cpp`-compatible models from Hugging Face by using this CLI argument: `-hf <user>/<model>[:quant]`
+
After downloading a model, use the CLI tools to run it locally - see below.
`llama.cpp` requires the model to be stored in the [GGUF](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md) file format. Models in other data formats can be converted to GGUF using the `convert_*.py` Python scripts in this repo.
#### A CLI tool for accessing and experimenting with most of `llama.cpp`'s functionality.
- <details open>
- <summary>Run simple text completion</summary>
-
- ```bash
- llama-cli -m model.gguf -p "I believe the meaning of life is" -n 128
-
- # I believe the meaning of life is to find your own truth and to live in accordance with it. For me, this means being true to myself and following my passions, even if they don't align with societal expectations. I think that's what I love about yoga – it's not just a physical practice, but a spiritual one too. It's about connecting with yourself, listening to your inner voice, and honoring your own unique journey.
- ```
-
- </details>
-
-- <details>
<summary>Run in conversation mode</summary>
+ Models with a built-in chat template will automatically activate conversation mode. If this doesn't occur, you can manually enable it by adding `-cnv` and specifying a suitable chat template with `--chat-template NAME`
+
```bash
- llama-cli -m model.gguf -p "You are a helpful assistant" -cnv
+ llama-cli -m model.gguf
# > hi, who are you?
# Hi there! I'm your helpful assistant! I'm an AI-powered chatbot designed to assist and provide information to users like you. I'm here to help answer your questions, provide guidance, and offer support on a wide range of topics. I'm a friendly and knowledgeable AI, and I'm always happy to help with anything you need. What's on your mind, and how can I assist you today?
</details>
- <details>
- <summary>Run with custom chat template</summary>
+ <summary>Run in conversation mode with custom chat template</summary>
```bash
- # use the "chatml" template
- llama-cli -m model.gguf -p "You are a helpful assistant" -cnv --chat-template chatml
+ # use the "chatml" template (use -h to see the list of supported templates)
+ llama-cli -m model.gguf -cnv --chat-template chatml
# use a custom template
- llama-cli -m model.gguf -p "You are a helpful assistant" -cnv --in-prefix 'User: ' --reverse-prompt 'User:'
+ llama-cli -m model.gguf -cnv --in-prefix 'User: ' --reverse-prompt 'User:'
```
- [Supported templates](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template)
+ </details>
+
+- <details>
+ <summary>Run simple text completion</summary>
+
+ To disable conversation mode explicitly, use `-no-cnv`
+
+ ```bash
+ llama-cli -m model.gguf -p "I believe the meaning of life is" -n 128 -no-cnv
+
+ # I believe the meaning of life is to find your own truth and to live in accordance with it. For me, this means being true to myself and following my passions, even if they don't align with societal expectations. I think that's what I love about yoga – it's not just a physical practice, but a spiritual one too. It's about connecting with yourself, listening to your inner voice, and honoring your own unique journey.
+ ```
</details>
).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}));
add_opt(common_arg(
{"-cnv", "--conversation"},
- string_format(
- "run in conversation mode:\n"
- "- does not print special tokens and suffix/prefix\n"
- "- interactive mode is also enabled\n"
- "(default: %s)",
- params.conversation ? "true" : "false"
- ),
+ "run in conversation mode:\n"
+ "- does not print special tokens and suffix/prefix\n"
+ "- interactive mode is also enabled\n"
+ "(default: auto enabled if chat template is available)",
+ [](common_params & params) {
+ params.conversation_mode = COMMON_CONVERSATION_MODE_ENABLED;
+ }
+ ).set_examples({LLAMA_EXAMPLE_MAIN}));
+ add_opt(common_arg(
+ {"-no-cnv", "--no-conversation"},
+ "force disable conversation mode (default: false)",
[](common_params & params) {
- params.conversation = true;
+ params.conversation_mode = COMMON_CONVERSATION_MODE_DISABLED;
}
).set_examples({LLAMA_EXAMPLE_MAIN}));
add_opt(common_arg(
DIMRE_METHOD_MEAN,
};
+enum common_conversation_mode {
+ COMMON_CONVERSATION_MODE_DISABLED = 0,
+ COMMON_CONVERSATION_MODE_ENABLED = 1,
+ COMMON_CONVERSATION_MODE_AUTO = 2,
+};
+
// sampling parameters
struct common_params_sampling {
uint32_t seed = LLAMA_DEFAULT_SEED; // the seed used to initialize llama_sampler
bool special = false; // enable special token output
bool interactive = false; // interactive mode
bool interactive_first = false; // wait for user input immediately
- bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
bool prompt_cache_all = false; // save user input and generations to prompt cache
bool prompt_cache_ro = false; // open the prompt cache read-only and do not update it
ggml_type cache_type_k = GGML_TYPE_F16; // KV cache data type for the K
ggml_type cache_type_v = GGML_TYPE_F16; // KV cache data type for the V
+ common_conversation_mode conversation_mode = COMMON_CONVERSATION_MODE_AUTO;
+
// multimodal models (see examples/llava)
std::string mmproj = ""; // path to multimodal projector // NOLINT
std::vector<std::string> image; // path to image file(s)
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
+static const char * DEFAULT_SYSTEM_MESSAGE = "You are a helpful assistant";
+
static llama_context ** g_ctx;
static llama_model ** g_model;
static common_sampler ** g_smpl;
LOG_WRN("%s: model was trained on only %d context tokens (%d specified)\n", __func__, n_ctx_train, n_ctx);
}
+ // auto enable conversation mode if chat template is available
+ const bool has_chat_template = !common_get_builtin_chat_template(model).empty() || !params.chat_template.empty();
+ if (params.conversation_mode == COMMON_CONVERSATION_MODE_AUTO) {
+ if (has_chat_template) {
+ LOG_INF("%s: chat template is available, enabling conversation mode (disable it with -no-cnv)\n", __func__);
+ params.conversation_mode = COMMON_CONVERSATION_MODE_ENABLED;
+ } else {
+ params.conversation_mode = COMMON_CONVERSATION_MODE_DISABLED;
+ }
+ }
+
+ // in case user force-activate conversation mode (via -cnv) without proper chat template, we show a warning
+ if (params.conversation_mode && !has_chat_template) {
+ LOG_WRN("%s: chat template is not available or is not supported. This may cause the model to output suboptimal responses\n", __func__);
+ }
+
// print chat template example in conversation mode
- if (params.conversation) {
+ if (params.conversation_mode) {
if (params.enable_chat_template) {
LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(model, params.chat_template).c_str());
} else {
std::vector<llama_token> embd_inp;
{
- auto prompt = (params.conversation && params.enable_chat_template && !params.prompt.empty())
- ? chat_add_and_format(model, chat_msgs, "system", params.prompt) // format the system prompt in conversation mode
+ auto prompt = (params.conversation_mode && params.enable_chat_template)
+ // format the system prompt in conversation mode (fallback to default if empty)
+ ? chat_add_and_format(model, chat_msgs, "system", params.prompt.empty() ? DEFAULT_SYSTEM_MESSAGE : params.prompt)
+ // otherwise use the prompt as is
: params.prompt;
if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
LOG_DBG("tokenize the prompt\n");
params.n_keep += add_bos; // always keep the BOS token
}
- if (params.conversation) {
+ if (params.conversation_mode) {
params.interactive_first = true;
}
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
LOG_INF( " - Press Ctrl+C to interject at any time.\n");
#endif
- LOG_INF( "%s\n", control_message);
+ LOG_INF( "%s", control_message);
+ if (params.conversation_mode && params.enable_chat_template && params.prompt.empty()) {
+ LOG_INF( " - Using default system message. To change it, set a different value via -p PROMPT or -f FILE argument.\n");
+ }
+ LOG_INF("\n");
is_interacting = params.interactive_first;
}
}
// if current token is not EOG, we add it to current assistant message
- if (params.conversation) {
+ if (params.conversation_mode) {
const auto id = common_sampler_last(smpl);
assistant_ss << common_token_to_piece(ctx, id, false);
}
if (n_past > 0 && is_interacting) {
LOG_DBG("waiting for user input\n");
- if (params.conversation) {
+ if (params.conversation_mode) {
LOG("\n> ");
}
}
std::string buffer;
- if (!params.input_prefix.empty() && !params.conversation) {
+ if (!params.input_prefix.empty() && !params.conversation_mode) {
LOG_DBG("appending input prefix: '%s'\n", params.input_prefix.c_str());
LOG("%s", params.input_prefix.c_str());
}
// Entering a empty line lets the user pass control back
if (buffer.length() > 1) {
// append input suffix if any
- if (!params.input_suffix.empty() && !params.conversation) {
+ if (!params.input_suffix.empty() && !params.conversation_mode) {
LOG_DBG("appending input suffix: '%s'\n", params.input_suffix.c_str());
LOG("%s", params.input_suffix.c_str());
}
string_process_escapes(buffer);
}
- bool format_chat = params.conversation && params.enable_chat_template;
+ bool format_chat = params.conversation_mode && params.enable_chat_template;
std::string user_inp = format_chat
? chat_add_and_format(model, chat_msgs, "user", std::move(buffer))
: std::move(buffer);