## Hot topics
+- A new binary `llama-mtmd-cli` is introduced to replace `llava-cli`, `minicpmv-cli` and `gemma3-cli` https://github.com/ggml-org/llama.cpp/pull/13012, `libllava` will be deprecated
- **How to use [MTLResidencySet](https://developer.apple.com/documentation/metal/mtlresidencyset?language=objc) to keep the GPU memory active?** https://github.com/ggml-org/llama.cpp/pull/11427
- **VS Code extension for FIM completions:** https://github.com/ggml-org/llama.vscode
- Universal [tool call support](./docs/function-calling.md) in `llama-server` https://github.com/ggml-org/llama.cpp/pull/9639
[](common_params & params, const std::string & value) {
params.chat_template = value;
}
- ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
+ ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_LLAVA}).set_env("LLAMA_ARG_CHAT_TEMPLATE"));
add_opt(common_arg(
{"--chat-template-file"}, "JINJA_TEMPLATE_FILE",
string_format(
add_dependencies(mtmd BUILD_INFO)
endif()
-set(TARGET llama-llava-cli)
-add_executable(${TARGET} llava-cli.cpp)
-set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-llava-cli)
-install(TARGETS ${TARGET} RUNTIME)
-target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_17)
-
-set(TARGET llama-minicpmv-cli)
-add_executable(${TARGET} minicpmv-cli.cpp)
-set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-minicpmv-cli)
-install(TARGETS ${TARGET} RUNTIME)
-target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_17)
+add_executable(llama-llava-cli deprecation-warning.cpp)
+add_executable(llama-gemma3-cli deprecation-warning.cpp)
+add_executable(llama-minicpmv-cli deprecation-warning.cpp)
set(TARGET llama-qwen2vl-cli)
add_executable(${TARGET} qwen2vl-cli.cpp)
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
target_compile_features(${TARGET} PRIVATE cxx_std_17)
-set(TARGET llama-gemma3-cli)
-add_executable(${TARGET} gemma3-cli.cpp)
-set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-gemma3-cli)
+set(TARGET llama-mtmd-cli)
+add_executable(${TARGET} mtmd-cli.cpp)
+set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-mtmd-cli)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common mtmd ${CMAKE_THREAD_LIBS_INIT})
target_compile_features(${TARGET} PRIVATE cxx_std_17)
--- /dev/null
+#include <cstdio>
+#include <string>
+
+int main(int argc, char** argv) {
+ std::string filename = "main";
+ if (argc >= 1) {
+ filename = argv[0];
+ }
+
+ // Get only the program name from the full path
+ size_t pos = filename.find_last_of("/\\");
+ if (pos != std::string::npos) {
+ filename = filename.substr(pos+1);
+ }
+
+ fprintf(stdout, "\n");
+ fprintf(stdout, "WARNING: The binary '%s' is deprecated.\n", filename.c_str());
+ fprintf(stdout, "Please use 'llama-mtmd-cli' instead.\n");
+ fprintf(stdout, "\n");
+
+ return EXIT_FAILURE;
+}
+++ /dev/null
-#include "arg.h"
-#include "log.h"
-#include "common.h"
-#include "sampling.h"
-#include "llama.h"
-#include "ggml.h"
-#include "console.h"
-#include "chat.h"
-#include "mtmd.h"
-
-#include <vector>
-#include <limits.h>
-#include <cinttypes>
-
-#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
-#include <signal.h>
-#include <unistd.h>
-#elif defined (_WIN32)
-#define WIN32_LEAN_AND_MEAN
-#ifndef NOMINMAX
-#define NOMINMAX
-#endif
-#include <windows.h>
-#include <signal.h>
-#endif
-
-static bool g_is_generating = false;
-
-/**
- * Please note that this is NOT a production-ready stuff.
- * It is a playground for trying Gemma 3 vision capabilities.
- * For contributors: please keep this code simple and easy to understand.
- */
-
-static void show_additional_info(int /*argc*/, char ** argv) {
- LOG(
- "Experimental CLI for using Gemma 3 vision model\n\n"
- "Usage: %s [options] -m <model> --mmproj <mmproj> --image ").c_str(), params->n_batch, &n_past, false);
- if (num_image_embeds > 1) {
- if (has_minicpmv_projector == 2) {
- size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip);
- eval_string(ctx_llava->ctx_llama, std::string("<slice>").c_str(), params->n_batch, &n_past, false);
- for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) {
- for (size_t j = 0; j < num_image_embeds_col; ++j) {
- eval_string(ctx_llava->ctx_llama, std::string("").c_str(), params->n_batch, &n_past, false);
- if (j == num_image_embeds_col - 1) {
- eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false);
- }
- }
- }
- eval_string(ctx_llava->ctx_llama, std::string("</slice>").c_str(), params->n_batch, &n_past, false);
- }
- else if (has_minicpmv_projector == 3 || has_minicpmv_projector == 4) {
- size_t num_image_embeds_col = clip_uhd_num_image_embeds_col(ctx_llava->ctx_clip);
- for (size_t i = 0; i < (num_image_embeds-1)/num_image_embeds_col; ++i) {
- for (size_t j = 0; j < num_image_embeds_col; ++j) {
- eval_string(ctx_llava->ctx_llama, std::string("<slice>").c_str(), params->n_batch, &n_past, false);
- process_eval_image_embed(ctx_llava, embeds, params->n_batch, &n_past, idx++);
- eval_string(ctx_llava->ctx_llama, std::string("</slice>").c_str(), params->n_batch, &n_past, false);
- if (j == num_image_embeds_col - 1) {
- eval_string(ctx_llava->ctx_llama, std::string("\n").c_str(), params->n_batch, &n_past, false);
- }
- }
- }
- }
- }
- LOG_INF("%s: image token past: %d\n", __func__, n_past);
-}
-
-static const char * sample(struct common_sampler * smpl,
- struct llama_context * ctx_llama,
- int * n_past) {
- const llama_token id = common_sampler_sample(smpl, ctx_llama, -1);
- common_sampler_accept(smpl, id, true);
-
- const llama_model * model = llama_get_model(ctx_llama);
- const llama_vocab * vocab = llama_model_get_vocab(model);
-
- static std::string ret;
- if (llama_vocab_is_eog(vocab, id)) {
- ret = "</s>";
- } else {
- ret = common_token_to_piece(ctx_llama, id);
- }
- eval_id(ctx_llama, id, n_past);
- return ret.c_str();
-}
-
-static struct llava_context * minicpmv_init(common_params * params, const std::string & fname, int &n_past){
- auto * ctx_clip = clip_init_context(params);
- auto * embeds = llava_image_embed_make_with_filename(ctx_clip, params->cpuparams.n_threads, fname.c_str());
- if (!embeds) {
- LOG_ERR("failed to load image %s. Terminating\n\n", fname.c_str());
- return NULL;
- }
-
- // process the prompt
- if (params->prompt.empty() && params->interactive == false) {
- LOG_ERR("prompt should be given or interactive mode should be on");
- return NULL;
- }
-
- auto * model = llava_init(params);
- if (model == NULL) {
- fprintf(stderr, "%s: error: failed to init minicpmv model\n", __func__);
- return NULL;
- }
- const int64_t t_llava_init_start_us = ggml_time_us();
- auto * ctx_llava = llava_init_context(params, model);
- ctx_llava->ctx_clip = ctx_clip;
- const int64_t t_llava_init_end_us = ggml_time_us();
- float t_llava_init_ms = (t_llava_init_end_us - t_llava_init_start_us) / 1000.0;
- LOG_INF("%s: llava init in %8.2f ms.\n", __func__, t_llava_init_ms);
-
- const int64_t t_process_image_start_us = ggml_time_us();
- process_image(ctx_llava, embeds, params, n_past);
- const int64_t t_process_image_end_us = ggml_time_us();
- float t_process_image_ms = (t_process_image_end_us - t_process_image_start_us) / 1000.0;
- LOG_INF("%s: llama process image in %8.2f ms.\n", __func__, t_process_image_ms);
-
- llava_image_embed_free(embeds);
- return ctx_llava;
-}
-
-static struct common_sampler * llama_init(struct llava_context * ctx_llava, common_params * params, const std::string & prompt, int & n_past, bool is_first = false){
- std::string user_prompt = prompt;
- int has_minicpmv_projector = clip_is_minicpmv(ctx_llava->ctx_clip);
- if (!is_first) {
- if (has_minicpmv_projector == 2) {
- user_prompt = "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n" + prompt;
- }
- else if (has_minicpmv_projector == 3) {
- user_prompt = "<|im_start|>user\n" + prompt;
- }
- else if (has_minicpmv_projector == 4) {
- user_prompt = "<|im_start|>user\n" + prompt;
- }
- }
-
- eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);
- if (has_minicpmv_projector == 2) {
- eval_string(ctx_llava->ctx_llama, "<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", params->n_batch, &n_past, false);
- }
- else if (has_minicpmv_projector == 3) {
- eval_string(ctx_llava->ctx_llama, "<|im_end|><|im_start|>assistant\n", params->n_batch, &n_past, false);
- }
- else if (has_minicpmv_projector == 4) {
- eval_string(ctx_llava->ctx_llama, "<|im_end|><|im_start|>assistant\n", params->n_batch, &n_past, false);
- }
-
- // generate the response
-
- LOG_INF("\n");
-
- struct common_sampler * smpl = common_sampler_init(ctx_llava->model, params->sampling);
- return smpl;
-}
-
-static const char * llama_loop(struct llava_context * ctx_llava,struct common_sampler * smpl, int &n_past){
-
- const char * tmp = sample(smpl, ctx_llava->ctx_llama, &n_past);
- return tmp;
-}
-
-int main(int argc, char ** argv) {
- ggml_time_init();
-
- common_params params;
-
- if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, show_additional_info)) {
- return 1;
- }
-
- common_init();
-
- if (params.mmproj.path.empty() || (params.image.empty())) {
- show_additional_info(argc, argv);
- return 1;
- }
-
- for (auto & image : params.image) {
- int n_past = 0;
- auto * ctx_llava = minicpmv_init(¶ms, image, n_past);
-
- if (!params.prompt.empty()) {
- LOG("<user>%s\n", params.prompt.c_str());
- LOG("<assistant>");
- auto * smpl = llama_init(ctx_llava, ¶ms, params.prompt, n_past, true);
- const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
- std::string response;
- bool have_tmp = false;
- for (int i = 0; i < max_tgt_len; i++) {
- const auto * tmp = llama_loop(ctx_llava, smpl, n_past);
- response += tmp;
- if (strcmp(tmp, "</s>") == 0){
- if (!have_tmp) {
- continue;
- }
- break;
- }
- if (strstr(tmp, "###")) break; // Yi-VL behavior
- have_tmp = true;
- printf("%s", tmp);
- if (strstr(response.c_str(), "<user>")) break; // minicpm-v
-
- fflush(stdout);
- }
- common_sampler_free(smpl);
- }else {
- while (true) {
- LOG("<user>");
- std::string prompt;
- std::getline(std::cin, prompt);
- LOG("<assistant>");
- auto * smpl = llama_init(ctx_llava, ¶ms, prompt, n_past, true);
- const int max_tgt_len = params.n_predict < 0 ? 256 : params.n_predict;
- std::string response;
- for (int i = 0; i < max_tgt_len; i++) {
- const auto * tmp = llama_loop(ctx_llava, smpl, n_past);
- response += tmp;
- if (strcmp(tmp, "</s>") == 0) break;
- printf("%s", tmp);// mistral llava-1.6
- if (strstr(response.c_str(), "<user>")) break; // minicpm-v
- fflush(stdout);
- }
- common_sampler_free(smpl);
- }
- }
- printf("\n");
- llama_perf_context_print(ctx_llava->ctx_llama);
-
- ctx_llava->model = NULL;
- llava_free(ctx_llava);
- }
-
- return 0;
-}
--- /dev/null
+#include "arg.h"
+#include "log.h"
+#include "common.h"
+#include "sampling.h"
+#include "llama.h"
+#include "ggml.h"
+#include "console.h"
+#include "chat.h"
+#include "mtmd.h"
+
+#include <vector>
+#include <limits.h>
+#include <cinttypes>
+
+#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
+#include <signal.h>
+#include <unistd.h>
+#elif defined (_WIN32)
+#define WIN32_LEAN_AND_MEAN
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#include <windows.h>
+#include <signal.h>
+#endif
+
+static bool g_is_generating = false;
+
+/**
+ * Please note that this is NOT a production-ready stuff.
+ * It is a playground for trying multimodal support in llama.cpp.
+ * For contributors: please keep this code simple and easy to understand.
+ */
+
+static void show_additional_info(int /*argc*/, char ** argv) {
+ LOG(
+ "Experimental CLI for multimodal\n\n"
+ "Usage: %s [options] -m <model> --mmproj <mmproj> --image <slice>\n ... </slice>
+ slice_tmpl = MTMD_SLICE_TMPL_MINICPMV_2_5;
+ tok_ov_img_start = lookup_token("");
+ tok_slices_start = lookup_token("<slice>");
+ tok_slices_end = lookup_token("</slice>");
+ tok_sli_img_start = tok_ov_img_start;
+ tok_sli_img_end = tok_ov_img_end;
+ tok_row_end = lookup_token("\n");
+
+ } else if (minicpmv_version == 3 || minicpmv_version == 4) {
+ // minicpmv 2.6 format:
+ // <slice> (slice) </slice><slice> (slice) </slice>\n ...
+ slice_tmpl = MTMD_SLICE_TMPL_MINICPMV_2_6;
+ tok_ov_img_start = lookup_token("");
+ tok_sli_img_start = lookup_token("<slice>");
+ tok_sli_img_end = lookup_token("</slice>");
+ tok_row_end = lookup_token("\n");
+
+ } else if (minicpmv_version != 0) {
+ GGML_ASSERT(false && "unsupported minicpmv version");
+ }
}
~mtmd_context() {
clip_free(ctx_clip);
}
+
+private:
+ llama_token lookup_token(const std::string & token_text) {
+ const llama_vocab * vocab = llama_model_get_vocab(text_model);
+ const int n_vocab = llama_vocab_n_tokens(vocab);
+ for (int i = 0; i < n_vocab; i++) {
+ if (token_to_piece(vocab, i, true) == token_text) {
+ return i;
+ }
+ }
+ return LLAMA_TOKEN_NULL;
+ }
+
+ std::string token_to_piece(const llama_vocab * vocab, llama_token token, bool special) {
+ std::string piece;
+ piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n'
+ const int n_chars = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
+ if (n_chars < 0) {
+ piece.resize(-n_chars);
+ int check = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special);
+ GGML_ASSERT(check == -n_chars);
+ } else {
+ piece.resize(n_chars);
+ }
+ return piece;
+ }
};
struct mtmd_image_tokens_data {
std::string prompt_modified(text.text);
std::string marker_modified(ctx->image_marker);
- projector_type proj_type = clip_get_projector_type(ctx->ctx_clip);
// a bit hacky here, but works for now
// for some models, we need to add prefix and suffix to the image embeddings
- if (proj_type == PROJECTOR_TYPE_GEMMA3) {
+ if (clip_is_gemma3(ctx->ctx_clip)) {
+ // gemma 3
// <start_of_image> ... (image embeddings) ... <end_of_image>
marker_modified = "<start_of_image>" + ctx->image_marker + "<end_of_image>";
string_replace_all(prompt_modified, ctx->image_marker, marker_modified);
}
+ // llava-1.5, llava-1.6, Yi-VL, Yi-34B, granite: don't need to add prefix and suffix
+ // for glm-edge, we don't need to add because the tokens are already in the returned embeddings
+
+ // TODO @ngxson : glm-edge : remove BOI / EOI tokens embeddings, decode them as normal tokens
+
std::vector<std::string> parts = string_split_str(prompt_modified, ctx->image_marker);
output.clear();
output.reserve(parts.size());
size_t i_img = 0;
+ // utility for adding raw tokens
+ auto add_text_chunk = [&output](std::vector<llama_token> && tokens) {
+ mtmd_input_chunk chunk{
+ MTMD_INPUT_CHUNK_TYPE_TEXT,
+ std::move(tokens),
+ {},
+ };
+ output.emplace_back(std::move(chunk));
+ };
+
+ // utility for splitting batch of multiple images into chunks of batch having single images
+ auto split_batch_to_chunk = [&ctx](clip_image_f32_batch && batch_f32, const std::string & id) {
+ std::vector<mtmd_input_chunk> chunks;
+
+ for (auto & entry : batch_f32.entries) {
+ mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens);
+ image_tokens->nx = clip_n_patches(ctx->ctx_clip);
+ image_tokens->ny = 1;
+ image_tokens->batch_f32.entries.push_back(std::move(entry));
+ image_tokens->id = id;
+
+ mtmd_input_chunk chunk{
+ MTMD_INPUT_CHUNK_TYPE_IMAGE,
+ {},
+ std::move(image_tokens),
+ };
+ chunks.emplace_back(std::move(chunk));
+ }
+
+ return chunks;
+ };
+
for (const auto & part : parts) {
//printf("tokenizing part: %s\n", part.c_str());
bool add_bos = &parts.front() == ∂
return 1;
}
- // shim layer
+ // convert mtmd_bitmap to clip_image_u8
clip_image_u8_ptr img_u8(clip_image_u8_init());
img_u8->nx = bitmaps[i_img].nx;
img_u8->ny = bitmaps[i_img].ny;
img_u8->buf.resize(bitmaps[i_img].data.size());
std::memcpy(img_u8->buf.data(), bitmaps[i_img].data.data(), img_u8->nx * img_u8->ny * 3);
+ clip_image_size img_u8_size{img_u8->nx, img_u8->ny};
// preprocess image
clip_image_f32_batch batch_f32;
return 2;
}
- mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens);
- image_tokens->nx = clip_n_patches(ctx->ctx_clip); // TODO @ngxson : use clip_n_patches_by_image
- image_tokens->ny = 1; // TODO
- image_tokens->batch_f32 = std::move(batch_f32);
- image_tokens->id = bitmaps[i_img].id; // optional
+ if (ctx->slice_tmpl == MTMD_SLICE_TMPL_MINICPMV_2_5 || ctx->slice_tmpl == MTMD_SLICE_TMPL_MINICPMV_2_6) {
+ // split batch into chunks of single images
+ auto chunks = split_batch_to_chunk(std::move(batch_f32), bitmaps[i_img].id);
+ GGML_ASSERT(chunks.size() > 0);
+
+ // add overview image
+ add_text_chunk({ctx->tok_ov_img_start});
+ output.emplace_back(std::move(chunks.front()));
+ chunks.erase(chunks.begin());
+ add_text_chunk({ctx->tok_ov_img_end});
+
+ // add slices
+ if (!chunks.empty()) {
+ clip_add_load_image_size(ctx->ctx_clip, &img_u8_size);
+ int n_col = clip_uhd_num_image_embeds_col(ctx->ctx_clip);
+ int n_row = (int)chunks.size() / n_col;
+ GGML_ASSERT(n_row * n_col == (int)chunks.size());
+ if (ctx->tok_slices_start != LLAMA_TOKEN_NULL) {
+ add_text_chunk({ctx->tok_slices_start});
+ }
+ for (int y = 0; y < n_row; y++) {
+ for (int x = 0; x < n_col; x++) {
+ if (ctx->tok_sli_img_start != LLAMA_TOKEN_NULL) {
+ add_text_chunk({ctx->tok_sli_img_start});
+ }
+ output.emplace_back(std::move(chunks[y * n_col + x]));
+ if (ctx->tok_sli_img_end != LLAMA_TOKEN_NULL) {
+ add_text_chunk({ctx->tok_sli_img_end});
+ }
+ }
+ if (ctx->tok_row_end != LLAMA_TOKEN_NULL && y != n_row - 1) {
+ add_text_chunk({ctx->tok_row_end});
+ }
+ }
+ if (ctx->tok_slices_end != LLAMA_TOKEN_NULL) {
+ add_text_chunk({ctx->tok_slices_end});
+ }
+ }
+
+ } else {
+ mtmd_image_tokens_ptr image_tokens(new mtmd_image_tokens);
+ image_tokens->nx = clip_n_patches(ctx->ctx_clip) * batch_f32.entries.size(); // TODO @ngxson : use clip_n_patches_by_image
+ image_tokens->ny = 1; // TODO
+ image_tokens->batch_f32 = std::move(batch_f32);
+ image_tokens->id = bitmaps[i_img].id; // optional
+
+ LOG_DBG("image_tokens->nx = %d\n", image_tokens->nx);
+ LOG_DBG("image_tokens->ny = %d\n", image_tokens->ny);
+ LOG_DBG("batch_f32 size = %d\n", (int)image_tokens->batch_f32.entries.size());
+
+ if (clip_is_glm(ctx->ctx_clip)) {
+ // glm-edge
+ image_tokens->nx += 2; // add 2 for the begin_of_image and end_of_image token embeddings
+ }
+
+ mtmd_input_chunk chunk{
+ MTMD_INPUT_CHUNK_TYPE_IMAGE,
+ {},
+ std::move(image_tokens),
+ };
+ output.emplace_back(std::move(chunk));
+ }
- mtmd_input_chunk chunk{
- MTMD_INPUT_CHUNK_TYPE_IMAGE,
- {},
- std::move(image_tokens),
- };
- output.emplace_back(std::move(chunk));
- i_img++;
+ i_img++; // move to next image
}
}
int32_t mtmd_encode(mtmd_context * ctx, const mtmd_image_tokens * image_tokens) {
int n_mmproj_embd = clip_n_mmproj_embd(ctx->ctx_clip);
ctx->image_embd_v.resize(image_tokens->n_tokens() * n_mmproj_embd);
- bool ok = clip_image_batch_encode(
- ctx->ctx_clip,
- ctx->n_threads,
- &image_tokens->batch_f32,
- ctx->image_embd_v.data());
+ bool ok = false;
+
+ // only effective for minicpmv and qwen2vl, other models will ignore load_image_size
+ {
+ clip_image_size slice_size{
+ image_tokens->batch_f32.entries[0]->nx,
+ image_tokens->batch_f32.entries[0]->ny};
+ clip_add_load_image_size(ctx->ctx_clip, &slice_size);
+ }
+
+ if (clip_is_llava(ctx->ctx_clip) || clip_is_minicpmv(ctx->ctx_clip) || clip_is_glm(ctx->ctx_clip)) {
+ // TODO @ngxson : llava does not support batched encoding ; this should be fixed inside clip_image_batch_encode()
+ const auto & entries = image_tokens->batch_f32.entries;
+ for (size_t i = 0; i < entries.size(); i++) {
+ int n_tokens_per_image = clip_n_patches(ctx->ctx_clip);
+ ok = clip_image_encode(
+ ctx->ctx_clip,
+ ctx->n_threads,
+ entries[i].get(),
+ ctx->image_embd_v.data() + i*n_mmproj_embd*n_tokens_per_image);
+ }
+ } else {
+ ok = clip_image_batch_encode(
+ ctx->ctx_clip,
+ ctx->n_threads,
+ &image_tokens->batch_f32,
+ ctx->image_embd_v.data());
+ }
+
return ok ? 0 : 1;
}
int32_t ret;
llama_pos n_past = pos0;
llama_batch text_batch = llama_batch_init(n_batch, 0, 1);
+ int n_mmproj_embd = clip_n_mmproj_embd(ctx->ctx_clip);
for (auto & chunk : chunks) {
bool is_last = &chunk == &chunks.back();
if (chunk.type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
- // TODO @ngxson : may need to split into smaller batches
text_batch.n_tokens = chunk.tokens_text.size();
- for (size_t i = 0; i < chunk.tokens_text.size(); i++) {
- text_batch.token [i] = chunk.tokens_text[i];
- text_batch.pos [i] = n_past++;
- text_batch.n_seq_id[i] = 1;
- text_batch.seq_id [i][0] = seq_id;
- text_batch.logits [i] = false;
- }
- if (is_last) {
- // always get logits for last input chunk
- text_batch.logits[text_batch.n_tokens - 1] = true;
- }
- ret = llama_decode(lctx, text_batch);
- if (ret != 0) {
- LOG_ERR("failed to decode text\n");
- llama_batch_free(text_batch);
- return ret;
+ size_t i = 0;
+ while (i < chunk.tokens_text.size()) { // split into batches
+ for (; i < chunk.tokens_text.size() && text_batch.n_tokens < n_batch; i++) {
+ text_batch.token [i] = chunk.tokens_text[i];
+ text_batch.pos [i] = n_past++;
+ text_batch.n_seq_id[i] = 1;
+ text_batch.seq_id [i][0] = seq_id;
+ text_batch.logits [i] = false;
+ }
+ if (is_last) {
+ // always get logits for last input chunk
+ text_batch.logits[text_batch.n_tokens - 1] = true;
+ }
+ ret = llama_decode(lctx, text_batch);
+ if (ret != 0) {
+ LOG_ERR("failed to decode text\n");
+ llama_batch_free(text_batch);
+ return ret;
+ }
}
} else if (chunk.type == MTMD_INPUT_CHUNK_TYPE_IMAGE) {
GGML_ASSERT(chunk.tokens_image != nullptr);
int64_t t0 = ggml_time_ms();
if (ctx->print_timings) {
- LOG_INF("encoding image...\n");
+ LOG_INF("encoding image or slice...\n");
}
ret = mtmd_encode(ctx, chunk.tokens_image.get());
if (ret != 0) {
return ret;
}
if (ctx->print_timings) {
- LOG_INF("image encoded in %" PRId64 " ms\n", ggml_time_ms() - t0);
+ LOG_INF("image/slice encoded in %" PRId64 " ms\n", ggml_time_ms() - t0);
}
int32_t n_tokens = mtmd_image_tokens_get_n_tokens(chunk.tokens_image.get());
+ int32_t i_batch = 0;
+ int32_t n_img_batches = GGML_PAD(n_tokens, n_batch) / n_batch;
float * embd = mtmd_get_output_embd(ctx);
- decode_embd_batch batch_img(embd, n_tokens, n_past, 0);
- int64_t t1 = ggml_time_ms();
- ret = llama_decode(lctx, batch_img.batch);
- if (ret != 0) {
- LOG_ERR("failed to decode image\n");
- llama_batch_free(text_batch);
- return ret;
+
+ if (mtmd_decode_use_non_causal(ctx)) {
+ llama_set_causal_attn(lctx, false);
+ // TODO @ngxson : need to make sure only one image is processed at a time, and n_ubatch must be enough to hold the image
}
- if (ctx->print_timings) {
- LOG_INF("image decoded in %" PRId64 " ms\n", ggml_time_ms() - t1);
+
+ while (i_batch < n_img_batches) { // split into batches
+ int32_t pos_offset = i_batch*n_batch;
+ int32_t n_tokens_batch = std::min(n_batch, n_tokens - pos_offset);
+ float * embd_batch = embd + pos_offset*n_mmproj_embd;
+ decode_embd_batch batch_img(embd_batch, n_tokens_batch, n_past, 0);
+
+ printf("decoding image batch %d/%d, n_tokens_batch = %d\n", i_batch+1, n_img_batches, n_tokens_batch);
+
+ int64_t t1 = ggml_time_ms();
+ ret = llama_decode(lctx, batch_img.batch);
+ if (ret != 0) {
+ LOG_ERR("failed to decode image\n");
+ llama_set_causal_attn(lctx, true); // restore causal attn
+ llama_batch_free(text_batch);
+ return ret;
+ }
+
+ if (ctx->print_timings) {
+ LOG_INF("image decoded (batch %d/%d) in %" PRId64 " ms\n", i_batch+1, n_img_batches, ggml_time_ms() - t1);
+ }
+
+ i_batch++;
+ n_past += n_tokens_batch;
}
- n_past += n_tokens;
+ if (mtmd_decode_use_non_causal(ctx)) {
+ llama_set_causal_attn(lctx, true);
+ }
} else {
GGML_ASSERT(false && "chunk type not supported");
arr_bin=()
arr_hf=()
+arr_tmpl=() # chat template
add_test() {
local bin=$1
local hf=$2
+ local tmpl=${3:-""} # default to empty string if not provided
arr_bin+=("$bin")
arr_hf+=("$hf")
+ arr_tmpl+=("$tmpl")
}
-add_test "llama-gemma3-cli" "ggml-org/gemma-3-4b-it-GGUF:Q4_K_M"
-add_test "llama-llava-cli" "cmp-nct/Yi-VL-6B-GGUF:Q5_K"
-add_test "llama-llava-cli" "guinmoon/MobileVLM-3B-GGUF:Q4_K_M"
-add_test "llama-llava-cli" "THUDM/glm-edge-v-5b-gguf:Q4_K_M"
-add_test "llama-llava-cli" "second-state/Llava-v1.5-7B-GGUF:Q2_K"
-add_test "llama-llava-cli" "cjpais/llava-1.6-mistral-7b-gguf:Q3_K"
-add_test "llama-llava-cli" "ibm-research/granite-vision-3.2-2b-GGUF:Q4_K_M"
-add_test "llama-minicpmv-cli" "second-state/MiniCPM-Llama3-V-2_5-GGUF:Q2_K" # model from openbmb is corrupted
-add_test "llama-minicpmv-cli" "openbmb/MiniCPM-V-2_6-gguf:Q2_K"
-add_test "llama-minicpmv-cli" "openbmb/MiniCPM-o-2_6-gguf:Q4_0"
+add_test "llama-mtmd-cli" "ggml-org/gemma-3-4b-it-GGUF:Q4_K_M"
+add_test "llama-mtmd-cli" "guinmoon/MobileVLM-3B-GGUF:Q4_K_M" "deepseek"
+add_test "llama-mtmd-cli" "THUDM/glm-edge-v-5b-gguf:Q4_K_M"
+add_test "llama-mtmd-cli" "second-state/Llava-v1.5-7B-GGUF:Q2_K" "vicuna"
+add_test "llama-mtmd-cli" "cjpais/llava-1.6-mistral-7b-gguf:Q3_K" "vicuna"
+add_test "llama-mtmd-cli" "ibm-research/granite-vision-3.2-2b-GGUF:Q4_K_M"
+add_test "llama-mtmd-cli" "second-state/MiniCPM-Llama3-V-2_5-GGUF:Q2_K" # model from openbmb is corrupted
+add_test "llama-mtmd-cli" "openbmb/MiniCPM-V-2_6-gguf:Q2_K"
+add_test "llama-mtmd-cli" "openbmb/MiniCPM-o-2_6-gguf:Q4_0"
add_test "llama-qwen2vl-cli" "bartowski/Qwen2-VL-2B-Instruct-GGUF:Q4_K_M"
+# add_test "llama-mtmd-cli" "cmp-nct/Yi-VL-6B-GGUF:Q5_K" # this model has broken chat template, not usable
+
###############
cmake --build build -j --target "${arr_bin[@]}"
for i in "${!arr_bin[@]}"; do
bin="${arr_bin[$i]}"
hf="${arr_hf[$i]}"
+ tmpl="${arr_tmpl[$i]}"
echo "Running test with binary: $bin and HF model: $hf"
echo ""
echo ""
- output=$("$PROJ_ROOT/build/bin/$bin" -hf "$hf" --image $SCRIPT_DIR/test-1.jpeg -p "what is the publisher name of the newspaper?" --temp 0 2>&1 | tee /dev/tty)
+ output=$(\
+ "$PROJ_ROOT/build/bin/$bin" \
+ -hf "$hf" \
+ --image $SCRIPT_DIR/test-1.jpeg \
+ -p "what is the publisher name of the newspaper?" \
+ --temp 0 -n 128 \
+ ${tmpl:+--chat-template "$tmpl"} \
+ 2>&1 | tee /dev/tty)
echo "$output" > $SCRIPT_DIR/output/$bin-$(echo "$hf" | tr '/' '-').log
return LLM_CHAT_TEMPLATE_PHI_3;
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
return tmpl_contains("</s>") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE;
+ } else if (tmpl_contains("<|{{ item['role'] }}|>") && tmpl_contains("<|begin_of_image|>")) {
+ return LLM_CHAT_TEMPLATE_GLMEDGE;
} else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) {
return LLM_CHAT_TEMPLATE_ZEPHYR;
} else if (tmpl_contains("bos_token + message['role']")) {