def set_vocab(self):
self._set_vocab_sentencepiece()
+ tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
+ if tokenizer_config_file.is_file():
+ with open(tokenizer_config_file, "r", encoding="utf-8") as f:
+ tokenizer_config_json = json.load(f)
+ if "add_prefix_space" in tokenizer_config_json:
+ self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"])
+
def set_gguf_parameters(self):
super().set_gguf_parameters()
if (head_dim := self.hparams.get("head_dim")) is None:
head_dim = self.hparams["hidden_size"] // num_heads
+ if "mlp_AR" in name or "vision_model" in name:
+ # skip vision model and projector tensors
+ return
+
if "ernie." in name:
name = name.replace("ernie.", "model.")
# split the qkv weights
raise ValueError(f"Unprocessed experts: {experts}")
+@ModelBase.register("PaddleOCRVLForConditionalGeneration")
+class PaddleOCRModel(Ernie4_5Model):
+ model_arch = gguf.MODEL_ARCH.PADDLEOCR
+
+
+@ModelBase.register("PaddleOCRVisionModel")
+class PaddleOCRVisionModel(MmprojModel):
+ # PaddleOCR-VL uses a modified version of Siglip
+ min_pixels: int = 0
+ max_pixels: int = 0
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ assert self.hparams_vision is not None
+ self.min_pixels = self.preprocessor_config["min_pixels"]
+ self.max_pixels = self.preprocessor_config["max_pixels"]
+ self.hparams_vision["image_size"] = int(math.sqrt(self.max_pixels))
+
+ def set_gguf_parameters(self):
+ super().set_gguf_parameters()
+ assert self.hparams_vision is not None
+ hparams = self.hparams_vision
+ self.gguf_writer.add_clip_projector_type(gguf.VisionProjectorType.PADDLEOCR)
+ self.gguf_writer.add_vision_max_pixels(self.max_pixels)
+ self.gguf_writer.add_vision_min_pixels(self.min_pixels)
+ self.gguf_writer.add_vision_use_gelu(True)
+ self.gguf_writer.add_vision_attention_layernorm_eps(hparams.get("rms_norm_eps", 1e-6))
+
+ def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+ name = name.replace("visual.", "model.")
+
+ if "vision_model" in name or "mlp_AR" in name:
+ if "packing_position_embedding" in name:
+ return # unused
+ elif "vision_model.head" in name:
+ # we don't yet support image embeddings for this model
+ return
+ else:
+ yield from super().modify_tensors(data_torch, name, bid)
+ return # skip other tensors
+
+
@ModelBase.register(
"Qwen2VLModel",
"Qwen2VLForConditionalGeneration",
RND1 = auto()
PANGU_EMBED = auto()
MISTRAL3 = auto()
+ PADDLEOCR = auto()
MIMO2 = auto()
STEP35 = auto()
LLAMA_EMBED = auto()
MODEL_ARCH.RND1: "rnd1",
MODEL_ARCH.PANGU_EMBED: "pangu-embedded",
MODEL_ARCH.MISTRAL3: "mistral3",
+ MODEL_ARCH.PADDLEOCR: "paddleocr",
MODEL_ARCH.MIMO2: "mimo2",
MODEL_ARCH.STEP35: "step35",
MODEL_ARCH.LLAMA_EMBED: "llama-embed",
MODEL_TENSOR.FFN_DOWN,
MODEL_TENSOR.FFN_UP,
],
+ MODEL_ARCH.PADDLEOCR: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.FFN_NORM,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
MODEL_ARCH.FALCON_H1: [
# Token embedding
MODEL_TENSOR.TOKEN_EMBD,
VOXTRAL = "voxtral"
LFM2 = "lfm2"
KIMIVL = "kimivl"
+ PADDLEOCR = "paddleocr"
KIMIK25 = "kimik25"
LIGHTONOCR = "lightonocr"
COGVLM = "cogvlm"
"multi_modal_projector.linear_{bid}",
"mm_projector.proj.linear_{bid}", # Kimi-K2.5
"visual.merger.mlp.{bid}", # qwen2vl
+ "mlp_AR.linear_{bid}", # PaddleOCR-VL
"merger.mlp.{bid}",
),
"mm_projector.pre_norm", # Kimi-K2.5
"pre_mm_projector_norm",
"model.vision.linear_proj.norm1", # cogvlm
+ "mlp_AR.pre_norm", # PaddleOCR-VL
"merger.ln_q",
),
MODEL_TENSOR.V_RESMPL_ATTN_OUT: (
"resampler.attn.out_proj",
+ "model.vision_model.head.attention.out_proj",
),
MODEL_TENSOR.V_RESMPL_KV: (
models/openai-moe-iswa.cpp
models/openelm.cpp
models/orion.cpp
+ models/paddleocr.cpp
models/pangu-embedded.cpp
models/phi2.cpp
models/phi3.cpp
{ LLM_ARCH_RND1, "rnd1" },
{ LLM_ARCH_PANGU_EMBED, "pangu-embedded" },
{ LLM_ARCH_MISTRAL3, "mistral3" },
+ { LLM_ARCH_PADDLEOCR, "paddleocr" },
{ LLM_ARCH_MIMO2, "mimo2" },
{ LLM_ARCH_STEP35, "step35" },
{ LLM_ARCH_LLAMA_EMBED, "llama-embed" },
case LLM_ARCH_INTERNLM2:
case LLM_ARCH_GRANITE:
case LLM_ARCH_ERNIE4_5:
+ case LLM_ARCH_PADDLEOCR:
case LLM_ARCH_SMOLLM3:
case LLM_ARCH_DREAM:
case LLM_ARCH_LLADA:
LLM_ARCH_RND1,
LLM_ARCH_PANGU_EMBED,
LLM_ARCH_MISTRAL3,
+ LLM_ARCH_PADDLEOCR,
LLM_ARCH_MIMO2,
LLM_ARCH_STEP35,
LLM_ARCH_LLAMA_EMBED,
} break;
case LLM_ARCH_ERNIE4_5:
case LLM_ARCH_ERNIE4_5_MOE:
+ case LLM_ARCH_PADDLEOCR:
{
+ // paddleocr need mrope_section
+ ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, false);
+
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
if (arch == LLM_ARCH_ERNIE4_5_MOE) {
ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
} break;
case LLM_ARCH_ERNIE4_5:
case LLM_ARCH_ERNIE4_5_MOE:
+ case LLM_ARCH_PADDLEOCR:
{
tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
{
llm = std::make_unique<llm_build_ernie4_5_moe>(*this, params);
} break;
+ case LLM_ARCH_PADDLEOCR:
+ {
+ llm = std::make_unique<llm_build_paddleocr>(*this, params);
+ } break;
case LLM_ARCH_HUNYUAN_MOE:
{
llm = std::make_unique<llm_build_hunyuan_moe>(*this, params);
return LLAMA_ROPE_TYPE_NEOX;
case LLM_ARCH_QWEN2VL:
+ case LLM_ARCH_PADDLEOCR:
return LLAMA_ROPE_TYPE_MROPE;
case LLM_ARCH_QWEN3VL:
case LLM_ARCH_QWEN3VLMOE:
|| t.first == "<|calls|>" // solar-open
|| t.first == "<end_of_turn>"
|| t.first == "<|endoftext|>"
+ || t.first == "</s>" // paddleocr
|| t.first == "<|eom_id|>"
|| t.first == "<EOT>"
|| t.first == "_<EOT>"
llm_build_ernie4_5_moe(const llama_model & model, const llm_graph_params & params);
};
+struct llm_build_paddleocr : public llm_graph_context {
+ llm_build_paddleocr(const llama_model & model, const llm_graph_params & params);
+};
+
template <bool iswa>
struct llm_build_exaone4 : public llm_graph_context {
llm_build_exaone4(const llama_model & model, const llm_graph_params & params);
--- /dev/null
+#include "models.h"
+
+llm_build_paddleocr::llm_build_paddleocr(const llama_model & model, const llm_graph_params & params) :
+ llm_graph_context(params) {
+
+ // NOTE: same with qwen2vl.cpp, but bias tensors are optional
+
+ const int64_t n_embd_head = hparams.n_embd_head_v;
+
+ GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
+ GGML_ASSERT(n_embd_head == hparams.n_rot);
+
+ ggml_tensor * cur;
+ ggml_tensor * inpL;
+
+ inpL = build_inp_embd(model.tok_embd);
+
+ int sections[4];
+ std::copy(std::begin(hparams.rope_sections), std::begin(hparams.rope_sections) + 4, sections);
+
+ // inp_pos - contains the positions
+ ggml_tensor * inp_pos = build_inp_pos();
+
+ auto * inp_attn = build_attn_inp_kv();
+
+ ggml_tensor * inp_out_ids = build_inp_out_ids();
+
+ for (int il = 0; il < n_layer; ++il) {
+ ggml_tensor * inpSA = inpL;
+
+ // norm
+ {
+ cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
+ cb(cur, "attn_norm", il);
+ }
+ // self-attention
+ {
+ ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
+ cb(Qcur, "Qcur", il);
+ if (model.layers[il].bq) {
+ Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
+ cb(Qcur, "Qcur", il);
+ }
+ ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
+ cb(Kcur, "Kcur", il);
+ if (model.layers[il].bk) {
+ Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
+ cb(Kcur, "Kcur", il);
+ }
+ ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
+ cb(Vcur, "Vcur", il);
+ if (model.layers[il].bv) {
+ Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
+ cb(Vcur, "Vcur", il);
+ }
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+ Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
+
+ Qcur = ggml_rope_multi(
+ ctx0, Qcur, inp_pos, nullptr,
+ n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
+
+ Kcur = ggml_rope_multi(
+ ctx0, Kcur, inp_pos, nullptr,
+ n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
+
+ cb(Qcur, "Qcur", il);
+ cb(Kcur, "Kcur", il);
+ cb(Vcur, "Vcur", il);
+
+ cur = build_attn(inp_attn,
+ model.layers[il].wo, model.layers[il].bo,
+ Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
+ }
+ if (il == n_layer - 1) {
+ // skip computing output for unused tokens
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
+ inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
+ }
+ ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
+ cb(ffn_inp, "ffn_inp", il);
+
+ // feed-forward network
+ {
+ cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
+ cb(cur, "ffn_norm", il);
+
+ cur = build_ffn(cur,
+ model.layers[il].ffn_up, NULL, NULL,
+ model.layers[il].ffn_gate, NULL, NULL,
+ model.layers[il].ffn_down, NULL, NULL,
+ NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
+ cb(cur, "ffn_out", il);
+ }
+ cur = ggml_add(ctx0, cur, ffn_inp);
+
+ cur = build_cvec(cur, il);
+ cb(cur, "l_out", il);
+
+ // input for next layer
+ inpL = cur;
+ }
+ cur = inpL;
+
+ cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
+
+ cb(cur, "result_norm", -1);
+ res->t_embd = cur;
+
+ // lm_head
+ cur = build_lora_mm(model.output, cur);
+
+ cb(cur, "result_output", -1);
+ res->t_logits = cur;
+
+ ggml_build_forward_expand(gf, cur);
+}
models/llama4.cpp
models/llava.cpp
models/minicpmv.cpp
+ models/paddleocr.cpp
models/pixtral.cpp
models/qwen2vl.cpp
models/qwen3vl.cpp
PROJECTOR_TYPE_MUSIC_FLAMINGO,
PROJECTOR_TYPE_LFM2,
PROJECTOR_TYPE_KIMIVL,
+ PROJECTOR_TYPE_PADDLEOCR,
PROJECTOR_TYPE_LIGHTONOCR,
PROJECTOR_TYPE_COGVLM,
PROJECTOR_TYPE_JANUS_PRO,
{ PROJECTOR_TYPE_MUSIC_FLAMINGO, "musicflamingo"},
{ PROJECTOR_TYPE_LFM2, "lfm2"},
{ PROJECTOR_TYPE_KIMIVL, "kimivl"},
+ { PROJECTOR_TYPE_PADDLEOCR, "paddleocr"},
{ PROJECTOR_TYPE_LIGHTONOCR,"lightonocr"},
{ PROJECTOR_TYPE_COGVLM, "cogvlm"},
{ PROJECTOR_TYPE_JANUS_PRO, "janus_pro"},
{
builder = std::make_unique<clip_graph_kimivl>(ctx, img);
} break;
+ case PROJECTOR_TYPE_PADDLEOCR:
+ {
+ builder = std::make_unique<clip_graph_paddleocr>(ctx, img);
+ } break;
case PROJECTOR_TYPE_KIMIK25:
{
builder = std::make_unique<clip_graph_kimik25>(ctx, img);
hparams.audio_window_len = 400;
hparams.audio_hop_len = 160;
} break;
+ case PROJECTOR_TYPE_PADDLEOCR:
+ {
+ hparams.n_merge = 2;
+ get_u32(KEY_IMAGE_MIN_PIXELS, hparams.image_min_pixels);
+ get_u32(KEY_IMAGE_MAX_PIXELS, hparams.image_max_pixels);
+
+ hparams.set_warmup_n_tokens(28*28); // avoid OOM on warmup
+ } break;
case PROJECTOR_TYPE_LFM2A:
{
// audio preprocessing params
model.mm_2_b = get_tensor(string_format(TN_LLAVA_PROJ, 2, "bias"));
} break;
case PROJECTOR_TYPE_KIMIVL:
+ case PROJECTOR_TYPE_PADDLEOCR:
case PROJECTOR_TYPE_KIMIK25:
{
model.mm_input_norm_w = get_tensor(TN_MM_INP_NORM);
case PROJECTOR_TYPE_QWEN25VL:
case PROJECTOR_TYPE_QWEN3VL:
case PROJECTOR_TYPE_GLM4V:
+ case PROJECTOR_TYPE_PADDLEOCR:
{
GGML_ASSERT(params.image_min_pixels > 0 && params.image_max_pixels > 0);
clip_image_u8 resized;
case PROJECTOR_TYPE_QWEN25VL:
case PROJECTOR_TYPE_QWEN3VL:
case PROJECTOR_TYPE_GLM4V:
+ case PROJECTOR_TYPE_PADDLEOCR:
case PROJECTOR_TYPE_YOUTUVL:
return (img->nx / params.patch_size) / 2;
default:
case PROJECTOR_TYPE_QWEN25VL:
case PROJECTOR_TYPE_QWEN3VL:
case PROJECTOR_TYPE_GLM4V:
+ case PROJECTOR_TYPE_PADDLEOCR:
case PROJECTOR_TYPE_YOUTUVL:
return (img->ny / params.patch_size) / 2;
default:
int y_patch = CLIP_ALIGN(img->ny, out_patch_size) / out_patch_size;
n_patches = x_patch * y_patch;
} break;
+ case PROJECTOR_TYPE_PADDLEOCR:
+ {
+ // dynamic size
+ int n_merge = ctx->model.hparams.n_merge;
+ int stride = n_merge * n_merge;
+ n_patches = CLIP_ALIGN(n_patches, stride) / stride;
+ } break;
case PROJECTOR_TYPE_PIXTRAL:
case PROJECTOR_TYPE_LIGHTONOCR:
{
}
}
+ set_input_i32("positions", positions);
+ } break;
+ case PROJECTOR_TYPE_PADDLEOCR:
+ {
+ const int merge_ratio = hparams.n_merge;
+ const int pw = image_size_width / patch_size;
+ const int ph = image_size_height / patch_size;
+ std::vector<int> positions(n_pos * 4);
+ int ptr = 0;
+ // NOTE: same as Qwen-VL, but x and y are swapped
+ for (int y = 0; y < ph; y += merge_ratio) {
+ for (int dy = 0; dy < 2; dy++) {
+ for (int x = 0; x < pw; x += merge_ratio) {
+ for (int dx = 0; dx < 2; dx++) {
+ positions[ ptr] = y + dy;
+ positions[ num_patches + ptr] = x + dx;
+ positions[2 * num_patches + ptr] = y + dy;
+ positions[3 * num_patches + ptr] = x + dx;
+ ptr++;
+ }
+ }
+ }
+ }
+
set_input_i32("positions", positions);
} break;
case PROJECTOR_TYPE_QWEN25VL:
return ctx->model.mm_2_w->ne[1];
case PROJECTOR_TYPE_LFM2:
case PROJECTOR_TYPE_KIMIVL:
+ case PROJECTOR_TYPE_PADDLEOCR:
case PROJECTOR_TYPE_KIMIK25:
return ctx->model.mm_2_w->ne[1];
case PROJECTOR_TYPE_COGVLM:
ggml_cgraph * build() override;
};
+struct clip_graph_paddleocr : clip_graph {
+ clip_graph_paddleocr(clip_ctx * ctx, const clip_image_f32 & img) : clip_graph(ctx, img) {}
+ ggml_cgraph * build() override;
+};
+
struct clip_graph_cogvlm : clip_graph {
clip_graph_cogvlm(clip_ctx * ctx, const clip_image_f32 & img) : clip_graph(ctx, img) {}
ggml_cgraph * build() override;
--- /dev/null
+#include "models.h"
+
+ggml_cgraph * clip_graph_paddleocr::build() {
+ const int n_pos = n_patches;
+ const int num_position_ids = n_pos * 4; // m-rope requires 4 dim per position
+
+ int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
+
+ ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
+ ggml_set_name(positions, "positions");
+ ggml_set_input(positions);
+
+ auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
+ return ggml_rope_multi(
+ ctx0, cur, positions, nullptr,
+ d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION,
+ 32768, 10000, 1, 0, 1, 32, 1);
+ };
+
+ ggml_tensor * learned_pos_embd = resize_position_embeddings();
+ ggml_tensor * inp = build_inp();
+ ggml_tensor * cur = build_vit(
+ inp, n_patches,
+ NORM_TYPE_NORMAL,
+ hparams.ffn_op,
+ learned_pos_embd,
+ add_pos);
+
+ cb(cur, "vit_out", -1);
+
+ {
+ // mlp_AR paddleocr projector
+ float proj_norm_eps = 1e-5;
+ cur = build_norm(cur,
+ model.mm_input_norm_w, model.mm_input_norm_b,
+ NORM_TYPE_NORMAL, proj_norm_eps, -1);
+
+ const int scale_factor = model.hparams.n_merge;
+ cur = build_patch_merge_permute(cur, scale_factor);
+ cur = build_ffn(cur,
+ model.mm_1_w, model.mm_1_b,
+ nullptr, nullptr,
+ model.mm_2_w, model.mm_2_b,
+ hparams.ffn_op, -1);
+ cb(cur, "mlp_out", -1);
+ }
+
+ // build the graph
+ ggml_build_forward_expand(gf, cur);
+
+ return gf;
+}
img_beg = "<|begin_of_image|>";
img_end = "<|end_of_image|>";
+ } else if (proj == PROJECTOR_TYPE_PADDLEOCR) {
+ // <|IMAGE_START|> ... (image embeddings) ... <|IMAGE_END|>
+ img_beg = "<|IMAGE_START|>";
+ img_end = "<|IMAGE_END|>";
}
}
case PROJECTOR_TYPE_QWEN25VL:
case PROJECTOR_TYPE_QWEN3VL:
case PROJECTOR_TYPE_GLM4V:
+ case PROJECTOR_TYPE_PADDLEOCR:
return true;
default:
return false;