]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
readme : model : mtdm : lfm2 improvements (#15476)
authorTarek Dakhran <redacted>
Fri, 22 Aug 2025 07:29:08 +0000 (09:29 +0200)
committerGitHub <redacted>
Fri, 22 Aug 2025 07:29:08 +0000 (09:29 +0200)
* Support untied embeddings

* Increase number of image tokens to 1024

* Add LFM2-VL to readme

* Actually use untied embeddings

README.md
gguf-py/gguf/constants.py
src/llama-arch.cpp
src/llama-model.cpp
tools/mtmd/clip.cpp

index 844675638427beb5991c0261ad857d9094c47ae6..a01ef6d503e400d6894c0cfe561faa5a36954110 100644 (file)
--- a/README.md
+++ b/README.md
@@ -151,6 +151,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo
 - [x] [Bunny](https://github.com/BAAI-DCAI/Bunny)
 - [x] [GLM-EDGE](https://huggingface.co/models?search=glm-edge)
 - [x] [Qwen2-VL](https://huggingface.co/collections/Qwen/qwen2-vl-66cee7455501d7126940800d)
+- [x] [LFM2-VL](https://huggingface.co/collections/LiquidAI/lfm2-vl-68963bbc84a610f7638d5ffa)
 
 </details>
 
index 41804f3a2bb1a3fc9b86c27d18dc6de69b0b39d0..61ebe6e5e77503968994d1f60b1c44f30ccc32e8 100644 (file)
@@ -2590,6 +2590,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
         MODEL_TENSOR.ATTN_K,
         MODEL_TENSOR.ATTN_V,
         MODEL_TENSOR.ATTN_OUT,
+        MODEL_TENSOR.OUTPUT,
     ],
     MODEL_ARCH.SMALLTHINKER: [
         MODEL_TENSOR.TOKEN_EMBD,
index 18dcc6ddfe56714ba4b2223c0681961447d365e5..c759a9c6d9e05756ba3f9ae756d08f36ea1a6dc2 100644 (file)
@@ -2010,6 +2010,7 @@ static const std::map<llm_arch, std::map<llm_tensor, const char *>> LLM_TENSOR_N
             { LLM_TENSOR_SHORTCONV_OUTPROJ, "blk.%d.shortconv.out_proj" },
             { LLM_TENSOR_TOKEN_EMBD,        "token_embd" },
             { LLM_TENSOR_TOKEN_EMBD_NORM,   "token_embd_norm" },
+            { LLM_TENSOR_OUTPUT,            "output" },
         }
     },
     {
index c4f0b12f247eeb9f3ceef9e448bd831181f9b532..3c8440a8f653c9ed4781dd5e2e0b9692d8583df0 100644 (file)
@@ -5474,8 +5474,13 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
                 } break;
             case LLM_ARCH_LFM2:
                 {
-                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,      "weight"), {n_embd, n_vocab}, 0);
                     tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
+                    output   = create_tensor(tn(LLM_TENSOR_OUTPUT,          "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
+
+                    if (output == NULL) {
+                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
+                    }
 
                     for (int i = 0; i < n_layer; ++i) {
                         auto & layer = layers[i];
@@ -17787,8 +17792,7 @@ struct llm_build_lfm2 : public llm_graph_context {
         cb(cur, "model.embedding_norm", -1);
         res->t_embd = cur;
 
-        // lm_head is tied with embeddings
-        cur = build_lora_mm(model.tok_embd, cur);
+        cur = build_lora_mm(model.output, cur);
         cb(cur, "lm_head", -1);
 
         res->t_logits = cur;
index 1676c328364f552bbf2f3ea33b1c15a56102830e..b3628db64f886d4cd05ee3aadf8d3cd059de4a32 100644 (file)
@@ -3513,7 +3513,7 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, str
         const int height = img->ny;
         const int total_factor = params.patch_size * params.proj_scale_factor;
         constexpr int min_image_tokens = 64;
-        constexpr int max_image_tokens = 256;
+        constexpr int max_image_tokens = 1024;
         const float min_pixels = min_image_tokens * total_factor * total_factor;
         const float max_pixels = max_image_tokens * total_factor * total_factor;