]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
model : mtmd : make input norm optional in LFM2-VL (#18594)
authorTarek Dakhran <redacted>
Sun, 4 Jan 2026 17:50:02 +0000 (18:50 +0100)
committerGitHub <redacted>
Sun, 4 Jan 2026 17:50:02 +0000 (18:50 +0100)
Upcoming LFM2-VL releases will have configurable input norm.
See https://github.com/huggingface/transformers/pull/43087 for details.

tools/mtmd/clip.cpp
tools/mtmd/models/siglip.cpp

index 9f551e8f3cd654a0a79d2646ac4879b3b649d4f4..9c9abd8d2e78eedd6e5ecf1238f29a44f0a234a8 100644 (file)
@@ -1552,6 +1552,14 @@ struct clip_model_loader {
                     model.projection = get_tensor(TN_MM_PROJECTOR);
                 } break;
             case PROJECTOR_TYPE_LFM2:
+                {
+                    model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM, false);
+                    model.mm_input_norm_b = get_tensor(TN_MM_INP_NORM_B, false);
+                    model.mm_1_w = get_tensor(string_format(TN_LLAVA_PROJ, 1, "weight"));
+                    model.mm_1_b = get_tensor(string_format(TN_LLAVA_PROJ, 1, "bias"));
+                    model.mm_2_w = get_tensor(string_format(TN_LLAVA_PROJ, 2, "weight"));
+                    model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
+                } break;
             case PROJECTOR_TYPE_KIMIVL:
                 {
                     model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM);
index ef094cfd0ebc5516f702493757073266043bd10f..b866a11c5aa4f61df7a04ac9f27d2adf5e5c8348 100644 (file)
@@ -50,10 +50,15 @@ ggml_cgraph * clip_graph_siglip::build() {
         const int scale_factor = model.hparams.n_merge;
         cur = build_patch_merge_permute(cur, scale_factor);
 
-        // projection
-        cur = ggml_norm(ctx0, cur, 1e-5); // default nn.LayerNorm
-        cur = ggml_mul(ctx0, cur, model.mm_input_norm_w);
-        cur = ggml_add(ctx0, cur, model.mm_input_norm_b);
+        // projection, in LFM2-VL input norm is optional
+        if (model.mm_input_norm_w) {
+            cur = ggml_norm(ctx0, cur, 1e-5); // default nn.LayerNorm
+            cur = ggml_mul(ctx0, cur, model.mm_input_norm_w);
+        }
+
+        if (model.mm_input_norm_b) {
+            cur = ggml_add(ctx0, cur, model.mm_input_norm_b);
+        }
 
         cur = build_ffn(cur,
             model.mm_1_w, model.mm_1_b,