- [x] [Bitnet b1.58 models](https://huggingface.co/1bitLLM)
- [x] [Flan T5](https://huggingface.co/models?search=flan-t5)
- [x] [Open Elm models](https://huggingface.co/collections/apple/openelm-instruct-models-6619ad295d7ae9f868b759ca)
-- [x] [ChatGLM3-6b](https://huggingface.co/THUDM/chatglm3-6b) + [ChatGLM4-9b](https://huggingface.co/THUDM/glm-4-9b)
+- [x] [ChatGLM3-6b](https://huggingface.co/THUDM/chatglm3-6b) + [ChatGLM4-9b](https://huggingface.co/THUDM/glm-4-9b) + [GLMEdge-1.5b](https://huggingface.co/THUDM/glm-edge-1.5b-chat) + [GLMEdge-4b](https://huggingface.co/THUDM/glm-edge-4b-chat)
- [x] [SmolLM](https://huggingface.co/collections/HuggingFaceTB/smollm-6695016cad7167254ce15966)
- [x] [EXAONE-3.0-7.8B-Instruct](https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct)
- [x] [FalconMamba Models](https://huggingface.co/collections/tiiuae/falconmamba-7b-66b9a580324dd1598b0f6d4a)
- [x] [Mini CPM](https://huggingface.co/models?search=MiniCPM)
- [x] [Moondream](https://huggingface.co/vikhyatk/moondream2)
- [x] [Bunny](https://github.com/BAAI-DCAI/Bunny)
+- [x] [GLM-EDGE](https://huggingface.co/models?search=glm-edge)
- [x] [Qwen2-VL](https://huggingface.co/collections/Qwen/qwen2-vl-66cee7455501d7126940800d)
</details>
if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
res = "jina-v2-code"
- if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
+ if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b" or chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516":
# ref: https://huggingface.co/THUDM/glm-4-9b-chat
res = "chatglm-bpe"
if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee":
self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
-@Model.register("ChatGLMModel", "ChatGLMForConditionalGeneration")
+@Model.register("GlmForCausalLM", "ChatGLMModel", "ChatGLMForConditionalGeneration")
class ChatGLMModel(Model):
model_arch = gguf.MODEL_ARCH.CHATGLM
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
- vocab_size = hparams["padded_vocab_size"]
+ vocab_size = hparams.get("padded_vocab_size",hparams["vocab_size"])
assert max(tokenizer.get_vocab().values()) < vocab_size
- tokpre = self.get_vocab_base_pre(tokenizer)
-
- merges = []
- vocab = {}
- mergeable_ranks = tokenizer.mergeable_ranks
- for token, rank in mergeable_ranks.items():
- vocab[ChatGLMModel.token_bytes_to_string(token)] = rank
- if len(token) == 1:
- continue
- merged = ChatGLMModel.bpe(mergeable_ranks, token, max_rank=rank)
- assert len(merged) >= 2 and len(merged) <= 7
- merges.append(' '.join(map(ChatGLMModel.token_bytes_to_string, merged)))
-
- # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
- added_vocab = tokenizer.get_added_vocab()
- reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
-
- for i in range(vocab_size):
- if i not in reverse_vocab:
- tokens.append(f"[PAD{i}]")
- toktypes.append(gguf.TokenType.UNUSED)
- elif reverse_vocab[i] in added_vocab:
- tokens.append(reverse_vocab[i])
- if tokenizer.added_tokens_decoder[i].special:
- toktypes.append(gguf.TokenType.CONTROL)
- else:
- toktypes.append(gguf.TokenType.USER_DEFINED)
- else:
- tokens.append(reverse_vocab[i])
- toktypes.append(gguf.TokenType.NORMAL)
-
+ tokens, toktypes, tokpre = self.get_vocab_base()
self.gguf_writer.add_tokenizer_model("gpt2")
self.gguf_writer.add_tokenizer_pre(tokpre)
self.gguf_writer.add_token_list(tokens)
self.gguf_writer.add_token_types(toktypes)
-
- special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
- special_vocab.merges = merges
+ special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True)
# only add special tokens when they were not already loaded from config.json
special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
def set_gguf_parameters(self):
n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
- n_head_kv = self.hparams.get("multi_query_group_num", n_head)
+ n_head_kv = self.hparams.get("multi_query_group_num", self.hparams.get("num_key_value_heads", n_head))
self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
self.gguf_writer.add_embedding_length(n_embed)
- self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", 4 * n_embed))
- self.gguf_writer.add_block_count(self.hparams["num_layers"])
+ self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", self.hparams.get("intermediate_size", 4 * n_embed)))
+ self.gguf_writer.add_block_count(self.hparams.get("num_layers", self.hparams["num_hidden_layers"]))
self.gguf_writer.add_head_count(n_head)
self.gguf_writer.add_head_count_kv(n_head_kv)
- self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layernorm_epsilon"])
+ self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("layernorm_epsilon",1e-5))
self.gguf_writer.add_file_type(self.ftype)
- self.gguf_writer.add_rope_dimension_count(64)
+ if "attention_dim" in self.hparams:
+ rope_dim = self.hparams["attention_dim"]
+ else:
+ rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"]
+ self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5)))
self.gguf_writer.add_add_bos_token(False)
rope_freq = 10000
if "rope_ratio" in self.hparams:
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
del bid # unused
- if name.endswith(".rotary_pos_emb.inv_freq"):
+ if name.endswith(".rotary_pos_emb.inv_freq") or name.startswith("model.vision."):
return []
name = name.removeprefix("transformer.")
--- /dev/null
+# GLMV-EDGE
+
+Currently this implementation supports [glm-edge-v-2b](https://huggingface.co/THUDM/glm-edge-v-2b) and [glm-edge-v-5b](https://huggingface.co/THUDM/glm-edge-v-5b).
+
+## Usage
+Build with cmake or run `make llama-llava-cli` to build it.
+
+After building, run: `./llama-llava-cli` to see the usage. For example:
+
+```sh
+./llama-llava-cli -m model_path/ggml-model-f16.gguf --mmproj model_path/mmproj-model-f16.gguf --image img_path/image.jpg -p "<|system|>\n system prompt <image><|user|>\n prompt <|assistant|>\n"
+```
+
+**note**: A lower temperature like 0.1 is recommended for better quality. add `--temp 0.1` to the command to do so.
+**note**: For GPU offloading ensure to use the `-ngl` flag just like usual
+
+## GGUF conversion
+
+1. Clone a GLMV-EDGE model ([2B](https://huggingface.co/THUDM/glm-edge-v-2b) or [5B](https://huggingface.co/THUDM/glm-edge-v-5b)). For example:
+
+```sh
+git clone https://huggingface.co/THUDM/glm-edge-v-5b or https://huggingface.co/THUDM/glm-edge-v-2b
+```
+
+2. Use `glmedge-surgery.py` to split the GLMV-EDGE model to LLM and multimodel projector constituents:
+
+```sh
+python ./examples/llava/glmedge-surgery.py -m ../model_path
+```
+
+4. Use `glmedge-convert-image-encoder-to-gguf.py` to convert the GLMV-EDGE image encoder to GGUF:
+
+```sh
+python ./examples/llava/glmedge-convert-image-encoder-to-gguf.py -m ../model_path --llava-projector ../model_path/glm.projector --output-dir ../model_path
+```
+
+5. Use `examples/convert_hf_to_gguf.py` to convert the LLM part of GLMV-EDGE to GGUF:
+
+```sh
+python convert_hf_to_gguf.py ../model_path
+```
+
+Now both the LLM part and the image encoder are in the `model_path` directory.
#define KEY_HAS_VIS_ENC "clip.has_vision_encoder"
#define KEY_HAS_LLAVA_PROJ "clip.has_llava_projector"
#define KEY_HAS_MINICPMV_PROJ "clip.has_minicpmv_projector"
+#define KEY_HAS_GLM_PROJ "clip.has_glm_projector"
#define KEY_MINICPMV_VERSION "clip.minicpmv_version"
#define KEY_HAS_QWEN2VL_MERGER "clip.has_qwen2vl_merger"
#define KEY_USE_GELU "clip.use_gelu"
#define TN_MINICPMV_ATTN "resampler.attn.%s.%s"
#define TN_MINICPMV_LN "resampler.ln_%s.%s"
+#define TN_GLM_ADAPER_CONV "adapter.conv.%s"
+#define TN_GLM_ADAPTER_LINEAR "adapter.linear.linear.%s"
+#define TN_GLM_ADAPTER_NORM_1 "adapter.linear.norm1.%s"
+#define TN_GLM_ADAPTER_D_H_2_4H "adapter.linear.dense_h_to_4h.%s"
+#define TN_GLM_ADAPTER_GATE "adapter.linear.gate.%s"
+#define TN_GLM_ADAPTER_D_4H_2_H "adapter.linear.dense_4h_to_h.%s"
+#define TN_GLM_BOI_W "adapter.boi"
+#define TN_GLM_EOI_W "adapter.eoi"
+
enum projector_type {
PROJECTOR_TYPE_MLP,
PROJECTOR_TYPE_LDP,
PROJECTOR_TYPE_LDPV2,
PROJECTOR_TYPE_RESAMPLER,
+ PROJECTOR_TYPE_GLM_EDGE,
PROJECTOR_TYPE_MERGER,
PROJECTOR_TYPE_UNKNOWN,
};
{ PROJECTOR_TYPE_LDP, "ldp" },
{ PROJECTOR_TYPE_LDPV2, "ldpv2"},
{ PROJECTOR_TYPE_RESAMPLER, "resampler"},
+ { PROJECTOR_TYPE_GLM_EDGE, "adapter"},
{ PROJECTOR_TYPE_MERGER, "qwen2vl_merger"},
};
struct ggml_tensor * mm_4_w = NULL;
struct ggml_tensor * mm_4_b = NULL;
+ //GLMV-Edge projection
+ struct ggml_tensor * mm_model_adapter_conv_w;
+ struct ggml_tensor * mm_model_adapter_conv_b;
+ struct ggml_tensor * boi_w;
+ struct ggml_tensor * eoi_w;
+
// MobileVLM projection
struct ggml_tensor * mm_model_mlp_1_w;
struct ggml_tensor * mm_model_mlp_1_b;
bool has_vision_encoder = false;
bool has_llava_projector = false;
bool has_minicpmv_projector = false;
+ bool has_glm_projector = false;
bool has_qwen2vl_merger = false;
int minicpmv_version = 2;
const int batch_size = imgs->size;
- if (ctx->has_llava_projector || ctx->has_minicpmv_projector) {
+ if (ctx->has_llava_projector || ctx->has_minicpmv_projector || ctx->has_glm_projector) {
GGML_ASSERT(batch_size == 1);
}
}
// loop over layers
- if (ctx->has_minicpmv_projector || ctx->has_qwen2vl_merger) {
- // TODO: figure out why we doing thing in this way ???
+ if (ctx->has_minicpmv_projector || ctx->has_glm_projector || ctx->has_qwen2vl_merger) {
n_layer += 1;
}
for (int il = 0; il < n_layer - 1; il++) {
GGML_ASSERT(false);
}
}
- else if (ctx->proj_type == PROJECTOR_TYPE_MERGER) {
+ // glm projector
+ else if (ctx->has_glm_projector) {
+ if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
+ size_t gridsz = (size_t)sqrt(embeddings->ne[1]);
+ embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings,1,0,2,3));
+ embeddings = ggml_reshape_3d(ctx0, embeddings, gridsz, gridsz, embeddings->ne[1]);
+ embeddings = ggml_conv_2d(ctx0, model.mm_model_adapter_conv_w, embeddings, 2, 2, 0, 0, 1, 1);
+ embeddings = ggml_reshape_3d(ctx0, embeddings,embeddings->ne[0]*embeddings->ne[1] , embeddings->ne[2], batch_size);
+ embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings, 1, 0, 2, 3));
+ embeddings = ggml_add(ctx0, embeddings, model.mm_model_adapter_conv_b);
+ //GLU
+ {
+ embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings);
+ embeddings = ggml_norm(ctx0, embeddings, eps);
+ embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_q_w), model.mm_model_ln_q_b);
+ embeddings = ggml_gelu_inplace(ctx0, embeddings);
+ struct ggml_tensor * x = embeddings;
+ embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, embeddings);
+ x = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w,x);
+ embeddings = ggml_silu_inplace(ctx0, embeddings);
+ embeddings = ggml_mul(ctx0, embeddings,x);
+ embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings);
+ }
+ } else {
+ GGML_ABORT("fatel error");
+ }
+ } else if (ctx->proj_type == PROJECTOR_TYPE_MERGER) {
embeddings = ggml_reshape_3d(ctx0, embeddings, hidden_size * 4, num_positions / 4, batch_size);
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
new_clip->minicpmv_version = gguf_get_val_i32(ctx, idx);
}
+ idx = gguf_find_key(ctx, KEY_HAS_GLM_PROJ);
+ if (idx != -1) {
+ new_clip->has_glm_projector = gguf_get_val_bool(ctx, idx);
+ }
+
idx = gguf_find_key(ctx, KEY_HAS_QWEN2VL_MERGER);
if (idx != -1) {
new_clip->has_qwen2vl_merger = gguf_get_val_bool(ctx, idx);
LOG_INF("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder);
LOG_INF("%s: llava_projector: %d\n", __func__, new_clip->has_llava_projector);
LOG_INF("%s: minicpmv_projector: %d\n", __func__, new_clip->has_minicpmv_projector);
+ LOG_INF("%s: glm_projector: %d\n", __func__, new_clip->has_glm_projector);
LOG_INF("%s: model size: %.2f MB\n", __func__, model_size / 1024.0 / 1024.0);
LOG_INF("%s: metadata size: %.2f MB\n", __func__, ggml_get_mem_size(meta) / 1024.0 / 1024.0);
}
vision_model.mm_model_ln_post_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "weight"));
vision_model.mm_model_ln_post_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "bias"));
}
+ else if (new_clip->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
+ vision_model.mm_model_adapter_conv_w = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPER_CONV, "weight"));
+ vision_model.mm_model_adapter_conv_b = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPER_CONV, "bias"));
+ vision_model.mm_model_mlp_0_w = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPTER_LINEAR,"weight"));
+ vision_model.mm_model_ln_q_w = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPTER_NORM_1,"weight"));
+ vision_model.mm_model_ln_q_b = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPTER_NORM_1,"bias"));
+ vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPTER_D_H_2_4H,"weight"));
+ vision_model.mm_model_mlp_2_w = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPTER_GATE,"weight"));
+ vision_model.mm_model_mlp_3_w = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPTER_D_4H_2_H,"weight"));
+ vision_model.boi_w = get_tensor(new_clip->ctx_data, TN_GLM_BOI_W);
+ vision_model.eoi_w = get_tensor(new_clip->ctx_data, TN_GLM_EOI_W);
+ }
else if (new_clip->proj_type == PROJECTOR_TYPE_MERGER) {
vision_model.mm_0_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "weight"));
vision_model.mm_0_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "bias"));
return true;
}
+ if (ctx->has_glm_projector) {
+ res_imgs->size = 1;
+ res_imgs->data = new clip_image_f32[res_imgs->size];
+ clip_image_u8 resized_image;
+ int32_t sz=ctx->vision_model.hparams.image_size;
+ bicubic_resize(*img, resized_image,sz,sz);
+ clip_image_f32 * res = clip_image_f32_init();
+ //clip_image_save_to_bmp(resized_image, "resized.bmp");
+ normalize_image_u8_to_f32(&resized_image, res, ctx->image_mean, ctx->image_std);
+ res_imgs->data[0] = *res;
+ clip_image_f32_free(res);
+ return true;
+ }
+
bool pad_to_square = true;
if (!ctx->has_vision_encoder) {
LOG_ERR("This gguf file seems to have no vision encoder\n");
}
size_t clip_embd_nbytes(const struct clip_ctx * ctx) {
- return clip_n_patches(ctx) * clip_n_mmproj_embd(ctx) * sizeof(float);
+ int extra_tokens = ctx->has_glm_projector ? 2 : 0;
+ return (clip_n_patches(ctx) + extra_tokens) * clip_n_mmproj_embd(ctx) * sizeof(float);
}
size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_h, int img_w) {
int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size);
- if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2) {
+ if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2 || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) {
n_patches /= 4;
} else if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) {
if (ctx->minicpmv_version == 2) {
if (ctx->has_minicpmv_projector) {
GGML_ASSERT(batch_size == 1);
}
+ if (ctx->has_glm_projector) {
+ GGML_ASSERT(batch_size == 1);
+ ggml_tensor * boi = ctx->vision_model.boi_w;
+ ggml_backend_tensor_get(boi,vec,0,ggml_nbytes(boi));
+ vec = (float*)(vec+ggml_nelements(boi)); //offset for boi
+ }
// build the inference graph
ggml_cgraph * gf = clip_image_build_graph(ctx, imgs, ctx->load_image_size, true);
ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions));
free(positions_data);
- {
+ if (!ctx->has_glm_projector) {
struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches");
int* patches_data = (int*)malloc(ggml_nbytes(patches));
for (int i = 0; i < num_patches; i++) {
// copy the embeddings to the location passed by the user
ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings));
+ if (ctx->has_glm_projector) {
+ //eoi
+ ggml_tensor * eoi = ctx->vision_model.eoi_w;
+ int offset = ggml_nelements(embeddings);
+ ggml_backend_tensor_get(eoi, vec+offset, 0, ggml_nbytes(eoi));
+ }
+
return true;
}
return 3584;
}
}
+ if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE){
+ return ctx->vision_model.mm_model_mlp_3_w->ne[1];
+ }
if (ctx->proj_type == PROJECTOR_TYPE_MERGER) {
return ctx->vision_model.mm_1_b->ne[0];
}
return 0;
}
+bool clip_is_glm(const struct clip_ctx * ctx) {
+ return ctx->has_glm_projector;
+}
bool clip_is_qwen2vl(const struct clip_ctx * ctx) {
return ctx->has_qwen2vl_merger;
}
CLIP_API bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec);
+CLIP_API bool clip_is_glm(const struct clip_ctx * ctx);
+
#ifdef __cplusplus
}
#endif
--- /dev/null
+import argparse
+import os
+import json
+import re
+
+import torch
+import numpy as np
+from gguf import *
+
+TEXT = "clip.text"
+VISION = "clip.vision"
+from transformers import SiglipVisionModel, SiglipVisionConfig
+
+def k(raw_key: str, arch: str) -> str:
+ return raw_key.format(arch=arch)
+
+
+def should_skip_tensor(name: str, has_text: bool, has_vision: bool, has_llava: bool) -> bool:
+ if name in (
+ "logit_scale",
+ "text_model.embeddings.position_ids",
+ "vision_model.embeddings.position_ids",
+ ):
+ return True
+
+ if name in (
+ "vision_model.head.probe",
+ "vision_model.head.attention.in_proj_weight",
+ "vision_model.head.attention.in_proj_bias",
+ "vision_model.head.attention.out_proj.weight",
+ "vision_model.head.attention.out_proj.bias",
+ "vision_model.head.layernorm.weight",
+ "vision_model.head.layernorm.bias",
+ "vision_model.head.mlp.fc1.weight",
+ "vision_model.head.mlp.fc1.bias",
+ "vision_model.head.mlp.fc2.weight",
+ "vision_model.head.mlp.fc2.bias"
+ ):
+ return True
+
+ if name.startswith("v") and not has_vision:
+ return True
+
+ if name.startswith("t") and not has_text:
+ return True
+
+ return False
+
+
+def get_tensor_name(name: str) -> str:
+ if "projection" in name:
+ return name
+ if "mm_projector" in name:
+ name = name.replace("model.mm_projector", "mm")
+ name = re.sub(r'mm\.mlp\.mlp', 'mm.model.mlp', name, count=1)
+ name = re.sub(r'mm\.peg\.peg', 'mm.model.peg', name, count=1)
+ return name
+
+ return name.replace("text_model", "t").replace("vision_model", "v").replace("encoder.layers", "blk").replace("embeddings.", "").replace("_proj", "").replace("self_attn.", "attn_").replace("layer_norm", "ln").replace("layernorm", "ln").replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("embedding", "embd").replace("final", "post").replace("layrnorm", "ln")
+
+
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
+ The reversible bpe codes work on unicode strings.
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
+ This is a significant percentage of your normal, say, 32K bpe vocab.
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1))
+ + list(range(ord("¡"), ord("¬") + 1))
+ + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+ap = argparse.ArgumentParser()
+ap.add_argument("-m", "--model-dir", help="Path to model directory cloned from HF Hub", required=True)
+ap.add_argument("--use-f32", action="store_true", default=False, help="Use f32 instead of f16")
+ap.add_argument("--text-only", action="store_true", required=False,
+ help="Save a text-only model. It can't be used to encode images")
+ap.add_argument("--vision-only", action="store_true", required=False,
+ help="Save a vision-only model. It can't be used to encode texts")
+ap.add_argument("--clip-model-is-vision", action="store_true", required=False,
+ help="The clip model is a pure vision model (ShareGPT4V vision extract for example)")
+ap.add_argument("--clip-model-is-openclip", action="store_true", required=False,
+ help="The clip model is from openclip (for ViT-SO400M type))")
+ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.")
+ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp, ldpv2", choices=["mlp", "ldp", "ldpv2","adapter"], default="adapter")
+ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None)
+# Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711
+# Example --image_mean 0.5 0.5 0.5 --image_std 0.5 0.5 0.5
+default_image_mean = [0.5, 0.5, 0.5]
+default_image_std = [0.5, 0.5, 0.5]
+ap.add_argument('--image-mean', type=float, nargs='+', help='Mean of the images for normalization (overrides processor) ', default=None)
+ap.add_argument('--image-std', type=float, nargs='+', help='Standard deviation of the images for normalization (overrides processor)', default=None)
+
+# with proper
+args = ap.parse_args()
+
+
+if args.text_only and args.vision_only:
+ print("--text-only and --image-only arguments cannot be specified at the same time.")
+ exit(1)
+
+if args.use_f32:
+ print("WARNING: Weights for the convolution op is always saved in f16, as the convolution op in GGML does not support 32-bit kernel weights yet.")
+
+# output in the same directory as the model if output_dir is None
+dir_model = args.model_dir
+
+if args.clip_model_is_vision or not os.path.exists(dir_model + "/vocab.json") or args.clip_model_is_openclip:
+ vocab = None
+ tokens = None
+else:
+ with open(dir_model + "/vocab.json", "r", encoding="utf-8") as f:
+ vocab = json.load(f)
+ tokens = [key for key in vocab]
+
+with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
+ config = json.load(f)
+ if args.clip_model_is_vision:
+ v_hparams = config
+ t_hparams = None
+ else:
+ v_hparams = config["vision_config"]
+ t_hparams = None
+
+# possible data types
+# ftype == 0 -> float32
+# ftype == 1 -> float16
+#
+# map from ftype to string
+ftype_str = ["f32", "f16"]
+
+ftype = 1
+if args.use_f32:
+ ftype = 0
+
+vision_config = SiglipVisionConfig(**v_hparams)
+model = SiglipVisionModel(vision_config)
+model.load_state_dict(torch.load(os.path.join(dir_model, "glm.clip")))
+
+fname_middle = None
+has_text_encoder = False
+has_vision_encoder = True
+has_glm_projector = True
+if args.text_only:
+ fname_middle = "text-"
+ has_vision_encoder = False
+elif args.llava_projector is not None:
+ fname_middle = "mmproj-"
+ has_text_encoder = False
+ has_glm_projector = True
+elif args.vision_only:
+ fname_middle = "vision-"
+ has_text_encoder = False
+else:
+ fname_middle = ""
+
+output_dir = args.output_dir if args.output_dir is not None else dir_model
+os.makedirs(output_dir, exist_ok=True)
+output_prefix = os.path.basename(output_dir).replace("ggml_", "")
+fname_out = os.path.join(output_dir, f"{fname_middle}model-{ftype_str[ftype]}.gguf")
+fout = GGUFWriter(path=fname_out, arch="clip")
+
+fout.add_bool("clip.has_text_encoder", has_text_encoder)
+fout.add_bool("clip.has_vision_encoder", has_vision_encoder)
+fout.add_bool("clip.has_glm_projector", has_glm_projector)
+fout.add_file_type(ftype)
+model_name = config["_name_or_path"] if "_name_or_path" in config else os.path.basename(dir_model)
+fout.add_name(model_name)
+if has_glm_projector:
+ fout.add_description("image encoder for glm4v")
+ fout.add_string("clip.projector_type", "adapter")
+else:
+ fout.add_description("two-tower CLIP model")
+
+if has_text_encoder:
+ assert t_hparams is not None
+ assert tokens is not None
+ # text_model hparams
+ fout.add_uint32(k(KEY_CONTEXT_LENGTH, TEXT), t_hparams["max_position_embeddings"])
+ fout.add_uint32(k(KEY_EMBEDDING_LENGTH, TEXT), t_hparams["hidden_size"])
+ fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, TEXT), t_hparams["intermediate_size"])
+ fout.add_uint32("clip.text.projection_dim", t_hparams.get("projection_dim", config["projection_dim"]))
+ fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, TEXT), t_hparams["num_attention_heads"])
+ fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, TEXT), t_hparams["layer_norm_eps"])
+ fout.add_uint32(k(KEY_BLOCK_COUNT, TEXT), t_hparams["num_hidden_layers"])
+ fout.add_token_list(tokens)
+
+if has_vision_encoder:
+ # vision_model hparams
+ fout.add_uint32("clip.vision.image_size", v_hparams["image_size"])
+ fout.add_uint32("clip.vision.patch_size", v_hparams["patch_size"])
+ fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), v_hparams["hidden_size"])
+ fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, VISION), v_hparams["intermediate_size"])
+ fout.add_uint32("clip.vision.projection_dim", 0)
+ fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, VISION), v_hparams["num_attention_heads"])
+ fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), 1e-6)
+ fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), v_hparams["num_hidden_layers"])
+
+ image_mean = args.image_mean if args.image_mean is not None else default_image_mean
+ image_std = args.image_std if args.image_std is not None else default_image_std
+ fout.add_array("clip.vision.image_mean", image_mean)
+ fout.add_array("clip.vision.image_std", image_std)
+
+fout.add_bool("clip.use_gelu", True)
+
+
+if has_glm_projector:
+ # model.vision_model.encoder.layers.pop(-1) # pyright: ignore[reportAttributeAccessIssue]
+ projector = torch.load(args.llava_projector)
+ for name, data in projector.items():
+ name = get_tensor_name(name)
+ # pw and dw conv ndim==4
+ if data.ndim == 2 or data.ndim == 4:
+ data = data.squeeze().numpy().astype(np.float16)
+ else:
+ data = data.squeeze().numpy().astype(np.float32)
+ if name.startswith("vision."):
+ name=name.replace("vision.","")
+ fout.add_tensor(name, data)
+ print(f"Projector {name} - {data.dtype} - shape = {data.shape}")
+ # print(f"Projector {name} tensors added\n")
+
+state_dict = model.state_dict() # pyright: ignore[reportAttributeAccessIssue]
+for name, data in state_dict.items():
+ if should_skip_tensor(name, has_text_encoder, has_vision_encoder, has_glm_projector):
+ # we don't need this
+ print(f"skipping parameter: {name}")
+ continue
+
+ name = get_tensor_name(name)
+ data = data.squeeze().numpy()
+
+ n_dims = len(data.shape)
+
+ # ftype == 0 -> float32, ftype == 1 -> float16
+ ftype_cur = 0
+ if n_dims == 4:
+ print(f"tensor {name} is always saved in f16")
+ data = data.astype(np.float16)
+ ftype_cur = 1
+ elif ftype == 1:
+ if name[-7:] == ".weight" and n_dims == 2:
+ # print(" Converting to float16")
+ data = data.astype(np.float16)
+ ftype_cur = 1
+ else:
+ # print(" Converting to float32")
+ data = data.astype(np.float32)
+ ftype_cur = 0
+ else:
+ if data.dtype != np.float32:
+ # print(" Converting to float32")
+ data = data.astype(np.float32)
+ ftype_cur = 0
+ print(f"siglip {name} - {data.dtype} - shape = {data.shape}")
+ # print(f"{name} - {ftype_str[ftype_cur]} - shape = {data.shape}")
+ fout.add_tensor(name, data)
+
+
+fout.write_header_to_file()
+fout.write_kv_data_to_file()
+fout.write_tensors_to_file()
+fout.close()
+
+print("Done. Output file: " + fname_out)
--- /dev/null
+import argparse
+import os
+import torch
+from transformers import AutoModel
+
+ap = argparse.ArgumentParser()
+ap.add_argument("-m", "--model", help="Path to GLM model")
+args = ap.parse_args()
+
+# find the model part that includes the the multimodal projector weights
+model = AutoModel.from_pretrained(args.model, trust_remote_code=True, local_files_only=True)
+checkpoint = model.state_dict()
+
+# get a list of mm tensor names
+mm_tensors = [k for k, v in checkpoint.items() if k.startswith("vision.adapter.")]
+
+# store these tensors in a new dictionary and torch.save them
+projector = {name: checkpoint[name].float() for name in mm_tensors}
+torch.save(projector, f"{args.model}/glm.projector")
+
+clip_tensors = [k for k, v in checkpoint.items() if k.startswith("vision.vit.model.vision_model.")]
+if len(clip_tensors) > 0:
+ clip = {name.replace("vision.vit.model.", ""): checkpoint[name].float() for name in clip_tensors}
+ torch.save(clip, f"{args.model}/glm.clip")
+
+ # added tokens should be removed to be able to convert Mistral models
+ if os.path.exists(f"{args.model}/added_tokens.json"):
+ with open(f"{args.model}/added_tokens.json", "w") as f:
+ f.write("{}\n")
+
+print("Done!")
+print(f"Now you can convert {args.model} to a regular LLaMA GGUF file.")
+print(f"Also, use {args.model}glm.projector to prepare a glm-encoder.gguf file.")
img_res_v.size = 0;
img_res_v.data = nullptr;
}
+ else if (clip_is_glm(ctx_clip)){
+ struct clip_image_size * load_image_size = clip_image_size_init();
+ load_image_size->width = img_res_v.data[0].nx;
+ load_image_size->height = img_res_v.data[0].ny;
+ clip_add_load_image_size(ctx_clip, load_image_size);
+
+ bool encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[0], image_embd);
+ int pos = int(load_image_size->width/clip_patch_size(ctx_clip)/2);
+ *n_img_pos = (pos * pos + 2);
+ if (!encoded){
+ LOG_ERR("Unable to encode image \n");
+ return false;
+ }
+ }
else if (strcmp(mm_patch_merge_type, "spatial_unpad") != 0) {
// flat / default llava-1.5 type embedding
*n_img_pos = clip_n_patches(ctx_clip);
if (clip_is_minicpmv(ctx_clip)) {
num_max_patches = 10;
}
+ if (clip_is_glm(ctx_clip)) {
+ num_max_patches = 1;
+ }
float * image_embd;
if (clip_is_qwen2vl(ctx_clip)) {
// qwen2vl don't split image into chunks, so `num_max_patches` is not needed.
MODEL_TENSOR.OUTPUT,
MODEL_TENSOR.ATTN_NORM,
MODEL_TENSOR.ATTN_QKV,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
MODEL_TENSOR.ATTN_OUT,
MODEL_TENSOR.FFN_NORM,
MODEL_TENSOR.FFN_DOWN,
{ LLM_TENSOR_OUTPUT, "output" },
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
{ LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
{ "llama3", LLM_CHAT_TEMPLATE_LLAMA_3 },
{ "chatglm3", LLM_CHAT_TEMPLATE_CHATGML_3 },
{ "chatglm4", LLM_CHAT_TEMPLATE_CHATGML_4 },
+ { "glmedge", LLM_CHAT_TEMPLATE_GLMEDGE },
{ "minicpm", LLM_CHAT_TEMPLATE_MINICPM },
{ "exaone3", LLM_CHAT_TEMPLATE_EXAONE_3 },
{ "rwkv-world", LLM_CHAT_TEMPLATE_RWKV_WORLD },
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) {
return LLM_CHAT_TEMPLATE_PHI_3;
} else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
- return LLM_CHAT_TEMPLATE_FALCON_3;
+ return tmpl_contains("</s>") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE;
} else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) {
return LLM_CHAT_TEMPLATE_ZEPHYR;
} else if (tmpl_contains("bos_token + message['role']")) {
if (add_ass) {
ss << "<|assistant|>";
}
+ } else if (tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) {
+ for (auto message : chat) {
+ std::string role(message->role);
+ ss << "<|" << role << "|>" << "\n" << message->content;
+ }
+ if (add_ass) {
+ ss << "<|assistant|>";
+ }
} else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) {
// MiniCPM-3B-OpenHermes-2.5-v2-GGUF
for (auto message : chat) {
LLM_CHAT_TEMPLATE_LLAMA_3,
LLM_CHAT_TEMPLATE_CHATGML_3,
LLM_CHAT_TEMPLATE_CHATGML_4,
+ LLM_CHAT_TEMPLATE_GLMEDGE,
LLM_CHAT_TEMPLATE_MINICPM,
LLM_CHAT_TEMPLATE_EXAONE_3,
LLM_CHAT_TEMPLATE_RWKV_WORLD,
{
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
switch (hparams.n_layer) {
- case 28: type = LLM_TYPE_6B; break;
- case 40: type = LLM_TYPE_9B; break;
+ case 28: {
+ if (hparams.n_head(0) == 16) {
+ type = LLM_TYPE_1_5B;
+ } else {
+ type = LLM_TYPE_6B;
+ }
+ } break;
+ case 40: {
+ if (hparams.n_head(0) == 24) {
+ type = LLM_TYPE_4B;
+ } else {
+ type = LLM_TYPE_9B;
+ }
+ } break;
default: type = LLM_TYPE_UNKNOWN;
}
} break;
auto & layer = layers[i];
layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+ layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
+ layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
- layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
- layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0);
+ if (layer.wqkv == nullptr) {
+ layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
+ layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0);
+ layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0);
+ layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
+ layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
+ layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
+ }
layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
struct ggml_tensor * Qcur = nullptr;
struct ggml_tensor * Kcur = nullptr;
struct ggml_tensor * Vcur = nullptr;
-
- cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
- cb(cur, "wqkv", il);
-
- cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
- cb(cur, "bqkv", il);
-
- Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
- Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
- Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
-
+ if (model.type == LLM_TYPE_1_5B || model.type == LLM_TYPE_4B || model.type == LLM_TYPE_9B) {
+ Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
+ if (model.layers[il].bq) {
+ Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
+ }
+ Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
+ if (model.layers[il].bk) {
+ Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
+ }
+ Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
+ if (model.layers[il].bv) {
+ Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
+ }
+ } else {
+ cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur);
+ cb(cur, "wqkv", il);
+ if (model.layers[il].bqkv) {
+ cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
+ cb(cur, "bqkv", il);
+ }
+ Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
+ Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
+ Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
+ }
cb(Qcur, "Qcur", il);
cb(Kcur, "Kcur", il);
cb(Vcur, "Vcur", il);
/* .bos_token= */ "",
/* .eos_token= */ "",
},
+ {
+ /* .name= */ "GLMEdge",
+ /* .template_str= */ "{% for item in messages %}{% if item['role'] == 'system' %}<|system|>\n{{ item['content'] }}{% elif item['role'] == 'user' %}<|user|>\n{{ item['content'] }}{% elif item['role'] == 'assistant' %}<|assistant|>\n{{ item['content'] }}{% endif %}{% endfor %}<|assistant|>",
+ /* .expected_output= */ "<|system|>\nYou are a helpful assistant<|user|>\nHello<|assistant|>\nHi there<|user|>\nWho are you<|assistant|>\n I am an assistant <|user|>\nAnother question<|assistant|>",
+ /* .expected_output_jinja= */ "<|system|>\nYou are a helpful assistant<|user|>\nHello<|assistant|>\nHi there<|user|>\nWho are you<|assistant|>\n I am an assistant <|user|>\nAnother question<|assistant|>",
+ /* .bos_token= */ "",
+ /* .eos_token= */ "",
+ },
{
/* .name= */ "MiniCPM-3B-OpenHermes-2.5-v2-GGUF",
/* .template_str= */ u8"{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + '<AI>'}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}",