]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : fix ChatGLM4 wrong shape (#9194)
authorCausalLM <redacted>
Tue, 27 Aug 2024 06:58:22 +0000 (14:58 +0800)
committerGitHub <redacted>
Tue, 27 Aug 2024 06:58:22 +0000 (09:58 +0300)
This should fix THUDM/glm-4-9b-chat-1m and CausalLM/miniG

src/llama.cpp

index 7c148b8305e1e6ca2bb28153105dc2d90893e0a5..f50972249baa7d5325025f88a13902e025e252f7 100644 (file)
@@ -8116,8 +8116,8 @@ static bool llm_load_tensors(
 
                         layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
 
-                        layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + (hparams.n_embd_head_k << 2)});
-                        layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + (hparams.n_embd_head_k << 2)});
+                        layer.wqkv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa});
+                        layer.bqkv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa});
 
                         layer.wo   = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd});