From: CausalLM Date: Tue, 27 Aug 2024 06:58:22 +0000 (+0800) Subject: llama : fix ChatGLM4 wrong shape (#9194) X-Git-Tag: upstream/0.0.4488~853 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=2e59d61c1b321431c597c1f12249273427d5640d;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : fix ChatGLM4 wrong shape (#9194) This should fix THUDM/glm-4-9b-chat-1m and CausalLM/miniG --- diff --git a/src/llama.cpp b/src/llama.cpp index 7c148b83..f5097224 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -8116,8 +8116,8 @@ static bool llm_load_tensors( layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); - layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + (hparams.n_embd_head_k << 2)}); - layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + (hparams.n_embd_head_k << 2)}); + layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}); + layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}); layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});