From: Gilad S. Date: Mon, 1 Dec 2025 20:21:13 +0000 (+0200) Subject: fix: llama arch implementation (#17665) X-Git-Tag: upstream/0.0.7446~227 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=00c361fe53e5fc105a077f90a0a22d4c60936ffe;p=pkg%2Fggml%2Fsources%2Fllama.cpp fix: llama arch implementation (#17665) --- diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 584efbf3..c46ee370 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -626,6 +626,8 @@ void llama_model::load_hparams(llama_model_loader & ml) { switch (arch) { case LLM_ARCH_LLAMA: { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + if (hparams.n_expert == 8) { switch (hparams.n_layer) { case 32: type = LLM_TYPE_8x7B; break;