clip_flash_attn_type flash_attn_type = CLIP_FLASH_ATTN_TYPE_AUTO;
bool is_allocated = false;
+ bool debug_output_embeddings = false;
+
clip_ctx(clip_context_params & ctx_params) {
flash_attn_type = ctx_params.flash_attn_type;
backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr);
if (ctx_params.cb_eval != nullptr) {
ggml_backend_sched_set_eval_callback(sched.get(), ctx_params.cb_eval, ctx_params.cb_eval_user_data);
}
+
+ debug_output_embeddings = std::getenv("MTMD_DEBUG_EMBEDDINGS") != nullptr;
}
~clip_ctx() {
// TODO: we don't support audio for Gemma 3N, but GGUF contains audio tensors
// we can remove this check when we implement audio support for Gemma 3N
skip_audio = ctx_vision->model.proj_type == PROJECTOR_TYPE_GEMMA3NV;
-
- // clip_debug_encode(ctx_vision, 24*14, 24*14, 0.5f);
}
if (loader.has_audio && !skip_audio) {
}
// Debug: dump final embeddings if MTMD_DEBUG_EMBEDDINGS is set
- if (std::getenv("MTMD_DEBUG_EMBEDDINGS") != nullptr) {
+ if (ctx->debug_output_embeddings) {
const int64_t n_embd = embeddings->ne[0];
const int64_t n_tokens = embeddings->ne[1];
std::vector<float> emb_data(n_embd * n_tokens);
//
// API for debugging
//
-void clip_debug_encode(clip_ctx * ctx, int h, int w, float fill_value) {
- clip_image_f32 img;
- img.nx = w;
- img.ny = h;
- img.buf.resize(h * w * 3);
- for (int i = 0; i < h * w * 3; i++) {
- img.buf[i] = static_cast<float>(fill_value);
- }
- clip_image_encode(ctx, 1, &img, nullptr);
- GGML_ASSERT(img.buf.empty() && "expected, always stop here");
+
+void clip_set_debug_output_embeddings(clip_ctx * ctx, bool enable) {
+ ctx->debug_output_embeddings = enable;
}
--- /dev/null
+#include "mtmd-debug.h"
+
+#include "arg.h"
+#include "debug.h"
+#include "log.h"
+#include "common.h"
+#include "llama.h"
+#include "ggml.h"
+#include "mtmd.h"
+#include "mtmd-helper.h"
+
+#include <vector>
+#include <cmath>
+#include <limits.h>
+#include <cinttypes>
+#include <clocale>
+
+// INTERNAL TOOL FOR DEBUGGING PURPOSES ONLY
+// NOT INTENDED FOR PUBLIC USE
+
+static void show_additional_info(int /*argc*/, char ** argv) {
+ LOG(
+ "Internal debugging tool for mtmd; See mtmd-debug.md for the pytorch equivalent code\n"
+ "Note: we repurpose some args from other examples, they will have different meaning here\n"
+ "\n"
+ "Usage: %s -m <model> --mmproj <mmproj> -p <mode> -n <size> --image <image> --audio <audio>\n"
+ "\n"
+ " -n <size>: number of pixels per edge for image (always square image), or number of samples for audio\n"
+ "\n"
+ " -p \"encode\" (debugging encode pass, default case):\n"
+ " --image can be:\n"
+ " \"white\", \"black\", \"gray\": filled 1.0f, 0.0f and 0.5f respectively\n"
+ " \"cb\": checkerboard pattern, alternate 1.0f and 0.0f\n"
+ " --audio can be:\n"
+ " \"one\", \"zero\", \"half\": filled 1.0f, 0.0f and 0.5f respectively\n"
+ " \"1010\": checkerboard pattern, alternate 1.0f and 0.0f\n"
+ "\n"
+ " -p \"preproc\" (debugging preprocessing pass):\n"
+ " --image can be:\n"
+ " \"white\", \"black\", \"gray\": filled image with respective colors\n"
+ " \"cb\": checkerboard pattern\n"
+ " --audio can be:\n"
+ " \"one\", \"zero\", \"half\": filled 1.0f, 0.0f and 0.5f respectively\n"
+ " \"440\": sine wave with 440 Hz frequency\n"
+ "\n",
+ argv[0]
+ );
+}
+
+int main(int argc, char ** argv) {
+ std::setlocale(LC_NUMERIC, "C");
+
+ ggml_time_init();
+
+ common_params params;
+
+ if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_MTMD, show_additional_info)) {
+ return 1;
+ }
+
+ common_init();
+ mtmd_helper_log_set(common_log_default_callback, nullptr);
+
+ if (params.mmproj.path.empty()) {
+ show_additional_info(argc, argv);
+ LOG_ERR("ERR: Missing --mmproj argument\n");
+ return 1;
+ }
+
+ LOG_INF("%s: loading model: %s\n", __func__, params.model.path.c_str());
+
+ mtmd::context_ptr ctx_mtmd;
+ common_init_result_ptr llama_init;
+ base_callback_data cb_data;
+
+ llama_init = common_init_from_params(params);
+ {
+ auto * model = llama_init->model();
+ const char * clip_path = params.mmproj.path.c_str();
+ mtmd_context_params mparams = mtmd_context_params_default();
+ mparams.use_gpu = params.mmproj_use_gpu;
+ mparams.print_timings = true;
+ mparams.n_threads = params.cpuparams.n_threads;
+ mparams.flash_attn_type = params.flash_attn_type;
+ mparams.warmup = params.warmup;
+ mparams.image_min_tokens = params.image_min_tokens;
+ mparams.image_max_tokens = params.image_max_tokens;
+ {
+ // always enable debug callback
+ mparams.cb_eval_user_data = &cb_data;
+ mparams.cb_eval = common_debug_cb_eval<false>;
+ }
+ ctx_mtmd.reset(mtmd_init_from_file(clip_path, model, mparams));
+ if (!ctx_mtmd.get()) {
+ LOG_ERR("Failed to load vision model from %s\n", clip_path);
+ exit(1);
+ }
+ }
+
+ std::string input;
+ int32_t inp_size = params.n_predict;
+ if (params.image.empty()) {
+ LOG_ERR("ERR: At least one of --image or --audio must be specified\n");
+ return 1;
+ }
+ if (inp_size <= 0) {
+ LOG_ERR("ERR: Invalid size specified with -n, must be greater than 0\n");
+ return 1;
+ }
+ input = params.image[0];
+
+ if (params.prompt.empty() || params.prompt == "encode") {
+ std::vector<std::vector<float>> image;
+ std::vector<float> samples;
+
+ if (input == "black") {
+ for (int i = 0; i < inp_size; ++i) {
+ auto row = std::vector<float>(inp_size * 3, 0.0f);
+ image.push_back(row);
+ }
+ } else if (input == "white") {
+ for (int i = 0; i < inp_size; ++i) {
+ auto row = std::vector<float>(inp_size * 3, 1.0f);
+ image.push_back(row);
+ }
+ } else if (input == "gray") {
+ for (int i = 0; i < inp_size; ++i) {
+ auto row = std::vector<float>(inp_size * 3, 0.5f);
+ image.push_back(row);
+ }
+ } else if (input == "cb") {
+ for (int i = 0; i < inp_size; ++i) {
+ auto row = std::vector<float>(inp_size * 3, 0.0f);
+ image.push_back(row);
+ }
+ for (int y = 0; y < inp_size; ++y) {
+ for (int x = 0; x < inp_size; ++x) {
+ float v = ((x + y) % 2) ? 0.0f : 1.0f;
+ image[y][x * 3 + 0] = v;
+ image[y][x * 3 + 1] = v;
+ image[y][x * 3 + 2] = v;
+ }
+ }
+ } else if (input == "one") {
+ samples = std::vector<float>(inp_size, 1.0f);
+ } else if (input == "zero") {
+ samples = std::vector<float>(inp_size, 0.0f);
+ } else if (input == "half") {
+ samples = std::vector<float>(inp_size, 0.5f);
+ } else if (input == "1010") {
+ samples.resize(inp_size);
+ for (int i = 0; i < inp_size; ++i) {
+ samples[i] = (i % 2) ? 0.0f : 1.0f;
+ }
+ } else {
+ LOG_ERR("ERR: Invalid input specified with --image/--audio\n");
+ show_additional_info(argc, argv);
+ return 1;
+ }
+
+ // run encode pass
+ LOG_INF("Running encode pass for input type: %s\n", input.c_str());
+ if (samples.size() > 0) {
+ LOG_INF("Input audio with %zu samples, type: %s\n", samples.size(), input.c_str());
+ mtmd_debug_encode_audio(ctx_mtmd.get(), samples);
+ } else {
+ LOG_INF("Input image with dimensions %d x %d, type: %s\n", inp_size, inp_size, input.c_str());
+ mtmd_debug_encode_image(ctx_mtmd.get(), image);
+ }
+
+ } else if (params.prompt == "preproc") {
+ std::vector<uint8_t> rgb_values;
+ std::vector<float> pcm_samples;
+
+ if (input == "black") {
+ rgb_values = std::vector<uint8_t>(inp_size * inp_size * 3, 0);
+ } else if (input == "white") {
+ rgb_values = std::vector<uint8_t>(inp_size * inp_size * 3, 255);
+ } else if (input == "gray") {
+ rgb_values = std::vector<uint8_t>(inp_size * inp_size * 3, 128);
+ } else if (input == "cb") {
+ rgb_values.resize(inp_size * inp_size * 3);
+ for (int y = 0; y < inp_size; ++y) {
+ for (int x = 0; x < inp_size; ++x) {
+ uint8_t v = ((x + y) % 2) ? 0 : 255;
+ rgb_values[(y * inp_size + x) * 3 + 0] = v;
+ rgb_values[(y * inp_size + x) * 3 + 1] = v;
+ rgb_values[(y * inp_size + x) * 3 + 2] = v;
+ }
+ }
+ } else if (input == "one") {
+ pcm_samples = std::vector<float>(inp_size, 1.0f);
+ } else if (input == "zero") {
+ pcm_samples = std::vector<float>(inp_size, 0.0f);
+ } else if (input == "half") {
+ pcm_samples = std::vector<float>(inp_size, 0.5f);
+ } else if (input == "440") {
+ pcm_samples.resize(inp_size);
+ float freq = 440.0f;
+ float sample_rate = mtmd_get_audio_sample_rate(ctx_mtmd.get());
+ float pi = 3.14159265f;
+ for (int i = 0; i < inp_size; ++i) {
+ pcm_samples[i] = sinf(2 * pi * freq * i / sample_rate);
+ }
+ } else {
+ LOG_ERR("ERR: Invalid input specified with --image/--audio\n");
+ show_additional_info(argc, argv);
+ return 1;
+ }
+
+ // run preprocessing pass
+ LOG_INF("Running preprocessing pass for input type: %s\n", input.c_str());
+ if (pcm_samples.size() > 0) {
+ LOG_INF("Input audio with %zu samples, type: %s\n", pcm_samples.size(), input.c_str());
+ mtmd_debug_preprocess_audio(ctx_mtmd.get(), pcm_samples);
+ } else {
+ LOG_INF("Input image with dimensions %d x %d, type: %s\n", inp_size, inp_size, input.c_str());
+ mtmd_debug_preprocess_image(ctx_mtmd.get(), rgb_values, inp_size, inp_size);
+ }
+
+ } else {
+ LOG_ERR("ERR: Invalid mode specified with -p\n");
+ show_additional_info(argc, argv);
+ return 1;
+ }
+
+ return 0;
+}
+
#include "clip-impl.h"
#include "mtmd.h"
#include "mtmd-audio.h"
+#include "debug/mtmd-debug.h"
#include "llama.h"
g_logger_state.log_callback = log_callback ? log_callback : clip_log_callback_default;
g_logger_state.log_callback_user_data = user_data;
}
+
+//
+// Debugging API (NOT intended for public use)
+//
+
+static void mtmd_debug_encode_impl(mtmd_context * ctx, clip_ctx * ctx_clip, clip_image_f32 & image) {
+ clip_set_debug_output_embeddings(ctx_clip, true);
+ int n_mmproj_embd = clip_n_mmproj_embd(ctx_clip);
+ int n_tokens = clip_n_output_tokens(ctx_clip, &image);
+ std::vector<float> embd_output(n_tokens * n_mmproj_embd, 0.0f);
+ bool ok = clip_image_encode(
+ ctx_clip,
+ ctx->n_threads,
+ &image,
+ embd_output.data());
+ if (!ok) {
+ LOG_ERR("%s: failed to encode image\n", __func__);
+ }
+}
+
+void mtmd_debug_encode_image(mtmd_context * ctx, const std::vector<std::vector<float>> & image) {
+ if (!ctx->ctx_v) {
+ LOG_ERR("%s: model does not support vision input\n", __func__);
+ return;
+ }
+ clip_image_f32 inp_image;
+ inp_image.nx = image.size();
+ inp_image.ny = inp_image.nx;
+ inp_image.buf.reserve(inp_image.nx * inp_image.ny);
+ for (const auto & row : image) {
+ inp_image.buf.insert(inp_image.buf.end(), row.begin(), row.end());
+ }
+ LOG_INF("%s: created input image with nx=%d, ny=%d\n", __func__, inp_image.nx, inp_image.ny);
+ mtmd_debug_encode_impl(ctx, ctx->ctx_v, inp_image);
+}
+
+void mtmd_debug_encode_audio(mtmd_context * ctx, const std::vector<float> & input) {
+ if (!ctx->ctx_a) {
+ LOG_ERR("%s: model does not support audio input\n", __func__);
+ return;
+ }
+ int n_mel = clip_get_hparams(ctx->ctx_a)->n_mel_bins;
+ clip_image_f32 inp_audio;
+ inp_audio.nx = input.size();
+ inp_audio.ny = n_mel;
+ inp_audio.buf.resize(input.size() * n_mel);
+ for (size_t i = 0; i < input.size(); i++) {
+ for (int j = 0; j < n_mel; j++) {
+ inp_audio.buf[j * inp_audio.nx + i] = input[i];
+ }
+ }
+ LOG_INF("%s: created input audio with nx=%d, ny=%d\n", __func__, inp_audio.nx, inp_audio.ny);
+ mtmd_debug_encode_impl(ctx, ctx->ctx_a, inp_audio);
+}
+
+void mtmd_debug_preprocess_image(mtmd_context * ctx, const std::vector<uint8_t> & rgb_values, int nx, int ny) {
+ if (!ctx->ctx_v) {
+ LOG_ERR("%s: model does not support vision input\n", __func__);
+ return;
+ }
+ clip_image_u8 img_u8;
+ img_u8.nx = nx;
+ img_u8.ny = ny;
+ img_u8.buf = rgb_values;
+ clip_image_f32_batch batch_f32;
+ bool ok = clip_image_preprocess(ctx->ctx_v, &img_u8, &batch_f32);
+ if (!ok) {
+ LOG_ERR("%s: failed to preprocess image\n", __func__);
+ return;
+ }
+ LOG_INF("%s: preprocessed image to batch_f32 with %d entries\n", __func__, (int)batch_f32.entries.size());
+ for (size_t i = 0; i < batch_f32.entries.size(); i++) {
+ LOG_INF("%s: entry %zu has nx=%d, ny=%d\n", __func__, i, batch_f32.entries[i]->nx, batch_f32.entries[i]->ny);
+ // TODO: better way to dump entry content?
+ }
+}
+
+void mtmd_debug_preprocess_audio(mtmd_context * ctx, const std::vector<float> & samples) {
+ if (!ctx->ctx_a) {
+ LOG_ERR("%s: model does not support audio input\n", __func__);
+ return;
+ }
+ std::vector<mtmd_audio_mel> mel_spec_chunks;
+ bool ok = ctx->audio_preproc->preprocess(samples.data(), samples.size(), mel_spec_chunks);
+ if (!ok) {
+ LOG_ERR("%s: failed to preprocess audio\n", __func__);
+ return;
+ }
+ LOG_INF("%s: preprocessed audio to %zu mel spec chunks\n", __func__, mel_spec_chunks.size());
+ for (size_t i = 0; i < mel_spec_chunks.size(); i++) {
+ LOG_INF("%s: mel spec chunk %zu has n_len=%d, n_mel=%d\n", __func__, i, mel_spec_chunks[i].n_len, mel_spec_chunks[i].n_mel);
+
+ // dump mel entries: data is stored as [n_mel][n_len] (mel-major)
+ const auto & mel = mel_spec_chunks[i];
+ for (int m = 0; m < mel.n_mel; m++) {
+ for (int t = 0; t < mel.n_len; t++) {
+ LOG_INF("mel[%zu][m=%d][t=%d] = %f\n", i, m, t, mel.data[m * mel.n_len + t]);
+ }
+ }
+ }
+}