]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
clip : revert the change of BOI/EOI token for GLM-edge (⚠️ breaking change) (#13259)
authorXuan-Son Nguyen <redacted>
Sat, 3 May 2025 18:07:54 +0000 (20:07 +0200)
committerGitHub <redacted>
Sat, 3 May 2025 18:07:54 +0000 (20:07 +0200)
tools/llava/clip-impl.h
tools/llava/clip.cpp
tools/llava/mtmd.cpp

index b575ca4d7c2a9a9b8ae16b183aa2d419bf886dc6..b78d930bce34cde8787043f03be8b326f22b3fea 100644 (file)
@@ -75,6 +75,8 @@
 #define TN_MM_PROJECTOR    "mm.model.fc.weight"         // idefics3
 #define TN_MM_PATCH_MERGER "mm.patch_merger.weight"     // mistral small 3.1
 #define TN_TOK_IMG_BREAK   "v.token_embd.img_break"     // pixtral
+#define TN_TOK_GLM_BOI     "adapter.boi"                // glm-edge (these embeddings are not in text model)
+#define TN_TOK_GLM_EOI     "adapter.eoi"                // glm-edge (these embeddings are not in text model)
 
 // mimicpmv
 #define TN_MINICPMV_POS_EMBD_K "resampler.pos_embed_k"
index 7607d4e3ae3a4747de1aeb7b73cf80b18f0a2775..3b60a526eedd80b5ad8aeda53f2492e6bbe164bb 100644 (file)
@@ -249,9 +249,11 @@ struct clip_vision_model {
     struct ggml_tensor * mm_4_w = nullptr;
     struct ggml_tensor * mm_4_b = nullptr;
 
-    //GLMV-Edge projection
+    // GLMV-Edge projection
     struct ggml_tensor * mm_model_adapter_conv_w = nullptr;
     struct ggml_tensor * mm_model_adapter_conv_b = nullptr;
+    struct ggml_tensor * mm_glm_tok_boi = nullptr;
+    struct ggml_tensor * mm_glm_tok_eoi = nullptr;
 
     // MobileVLM projection
     struct ggml_tensor * mm_model_mlp_1_w = nullptr;
@@ -1559,6 +1561,13 @@ static ggml_cgraph * clip_image_build_graph_legacy(clip_ctx * ctx, const clip_im
             embeddings = ggml_mul(ctx0, embeddings,x);
             embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings);
         }
+        // arrangement of BOI/EOI token embeddings
+        // note: these embeddings are not present in text model, hence we cannot process them as text tokens
+        // see: https://huggingface.co/THUDM/glm-edge-v-2b/blob/main/siglip.py#L53
+        {
+            embeddings = ggml_concat(ctx0, model.mm_glm_tok_boi, embeddings, 1); // BOI
+            embeddings = ggml_concat(ctx0, embeddings, model.mm_glm_tok_eoi, 1); // EOI
+        }
     }
 
     else if (ctx->proj_type == PROJECTOR_TYPE_QWEN2VL) {
@@ -1972,12 +1981,14 @@ struct clip_model_loader {
                 {
                     vision_model.mm_model_adapter_conv_w = get_tensor(string_format(TN_GLM_ADAPER_CONV, "weight"));
                     vision_model.mm_model_adapter_conv_b = get_tensor(string_format(TN_GLM_ADAPER_CONV, "bias"));
-                    vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR,"weight"));
-                    vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1,"weight"));
-                    vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1,"bias"));
-                    vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H,"weight"));
-                    vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE,"weight"));
-                    vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H,"weight"));
+                    vision_model.mm_model_mlp_0_w = get_tensor(string_format(TN_GLM_ADAPTER_LINEAR, "weight"));
+                    vision_model.mm_model_ln_q_w = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "weight"));
+                    vision_model.mm_model_ln_q_b = get_tensor(string_format(TN_GLM_ADAPTER_NORM_1, "bias"));
+                    vision_model.mm_model_mlp_1_w = get_tensor(string_format(TN_GLM_ADAPTER_D_H_2_4H, "weight"));
+                    vision_model.mm_model_mlp_2_w = get_tensor(string_format(TN_GLM_ADAPTER_GATE, "weight"));
+                    vision_model.mm_model_mlp_3_w = get_tensor(string_format(TN_GLM_ADAPTER_D_4H_2_H, "weight"));
+                    vision_model.mm_glm_tok_boi = get_tensor(string_format(TN_TOK_GLM_BOI, "weight"));
+                    vision_model.mm_glm_tok_eoi = get_tensor(string_format(TN_TOK_GLM_EOI, "weight"));
                 } break;
             case PROJECTOR_TYPE_QWEN2VL:
             case PROJECTOR_TYPE_QWEN25VL:
@@ -2948,6 +2959,7 @@ int clip_n_output_tokens(const struct clip_ctx * ctx, struct clip_image_f32 * im
 
     if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2 || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
         n_patches /= 4;
+        n_patches += 2; // for BOI and EOI token embeddings
     } else if (ctx->proj_type == PROJECTOR_TYPE_MINICPMV) {
         if (ctx->minicpmv_version == 2) {
             n_patches = 96;
index d1d7530feb6259510f5ea048f822c2c2aa6b17a1..73abf2ad18e556226a24c0c283ab73f8b32d0512 100644 (file)
@@ -189,11 +189,6 @@ int32_t mtmd_tokenize(mtmd_context * ctx,
         marker_modified = "<start_of_image>" + ctx->image_marker + "<end_of_image>";
         string_replace_all(prompt_modified, ctx->image_marker, marker_modified);
 
-    } else if (proj_type == PROJECTOR_TYPE_GLM_EDGE) {
-        // <|begin_of_image|> ... (image embeddings) ... <|end_of_image|>
-        marker_modified = "<|begin_of_image|>" + ctx->image_marker + "<|end_of_image|>";
-        string_replace_all(prompt_modified, ctx->image_marker, marker_modified);
-
     } else if (proj_type == PROJECTOR_TYPE_IDEFICS3) {
         // https://github.com/huggingface/transformers/blob/a42ba80fa520c784c8f11a973ca9034e5f859b79/src/transformers/models/idefics3/processing_idefics3.py#L192-L215
         marker_modified = "<fake_token_around_image><global-img>" + ctx->image_marker + "<fake_token_around_image>";
@@ -213,6 +208,7 @@ int32_t mtmd_tokenize(mtmd_context * ctx,
     }
 
     // llava-1.5, llava-1.6, Yi-VL, Yi-34B, granite: don't need to add prefix and suffix
+    // for glm-edge, BOI and EOI token's embeddings are not present in the text model
 
     std::vector<std::string> parts = string_split_str(prompt_modified, ctx->image_marker);
     output.clear();