]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : clarify nemotron-h.cpp comment about RoPE [no ci] (#18997)
authorDaniel Bevenius <redacted>
Wed, 21 Jan 2026 17:31:34 +0000 (18:31 +0100)
committerGitHub <redacted>
Wed, 21 Jan 2026 17:31:34 +0000 (18:31 +0100)
This commit removes the mention of RoPE in the comment for the Q and K
computation as RoPE is not applied.

src/models/nemotron-h.cpp

index eb135e63f1881af0091ba6805e7895b77c5ce183..079c730ac292ed9596c79d6aec38ab57d54df45b 100644 (file)
@@ -67,7 +67,7 @@ ggml_tensor * llm_build_nemotron_h::build_attention_layer(ggml_tensor *
                                                           const llama_model &       model,
                                                           const int64_t             n_embd_head,
                                                           const int                 il) {
-    // compute Q and K and (optionally) RoPE them
+    // compute Q and K
     ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
     cb(Qcur, "Qcur", il);
     if (model.layers[il].bq) {