]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama-graph: replace cont with reshape for alpha in qwen35 (#20640)
authorAman Gupta <redacted>
Mon, 16 Mar 2026 14:07:13 +0000 (22:07 +0800)
committerGitHub <redacted>
Mon, 16 Mar 2026 14:07:13 +0000 (22:07 +0800)
src/models/qwen35.cpp
src/models/qwen35moe.cpp

index 3108bf331ac95633478c070400e847d2ceb6d340..d07579ee87e2c0eadd73b61817aa9b861e4f2456 100644 (file)
@@ -224,7 +224,7 @@ ggml_tensor * llm_build_qwen35::build_layer_attn_linear(
     beta = ggml_sigmoid(ctx0, beta);
 
     ggml_tensor * alpha = build_lora_mm(model.layers[il].ssm_alpha, cur, model.layers[il].ssm_alpha_s);
-    alpha = ggml_cont_3d(ctx0, alpha, num_v_heads, n_seq_tokens, n_seqs);
+    alpha = ggml_reshape_3d(ctx0, alpha, num_v_heads, n_seq_tokens, n_seqs);
     cb(alpha, "alpha", il);
 
     ggml_tensor * alpha_biased   = ggml_add(ctx0, alpha, model.layers[il].ssm_dt);
index 165e2412e560b913ccf5f665f3786cd88e0c1242..b38660c0bce9c7d187bb7f5a476d959cb717bb05 100644 (file)
@@ -224,7 +224,7 @@ ggml_tensor * llm_build_qwen35moe ::build_layer_attn_linear(
     beta = ggml_sigmoid(ctx0, beta);
 
     ggml_tensor * alpha = build_lora_mm(model.layers[il].ssm_alpha, cur, model.layers[il].ssm_alpha_s);
-    alpha = ggml_cont_3d(ctx0, alpha, num_v_heads, n_seq_tokens, n_seqs);
+    alpha = ggml_reshape_3d(ctx0, alpha, num_v_heads, n_seq_tokens, n_seqs);
     cb(alpha, "alpha", il);
 
     ggml_tensor * alpha_biased   = ggml_add(ctx0, alpha, model.layers[il].ssm_dt);