)
self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
- # sanity check
- attn_scalar = self.hparams["query_pre_attn_scalar"]
- if attn_scalar != hparams["hidden_size"] / hparams["num_attention_heads"]:
- raise ValueError("query_pre_attn_scalar must be equal to n_embd / n_head")
-
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
del bid # unused
ext_factor, attn_factor, beta_fast, beta_slow);
cb(Qcur, "Qcur", il);
- Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head)));
+ // ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e
+ switch (model.type) {
+ case e_model::MODEL_9B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); break;
+ case e_model::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
+ default: GGML_ASSERT(false);
+ };
cb(Qcur, "Qcur_scaled", il);
Kcur = ggml_rope_ext(