]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
hparams : initialize arrays (#13728)
authorGeorgi Gerganov <redacted>
Fri, 23 May 2025 17:16:13 +0000 (20:16 +0300)
committerGitHub <redacted>
Fri, 23 May 2025 17:16:13 +0000 (20:16 +0300)
ggml-ci

src/llama-hparams.cpp
src/llama-hparams.h
src/llama-model.cpp

index ac05db46e69ae44699b86c948f07ed5ca7cbabcc..1499eb08a5dd9246f182dc3545d4e23aecc5ca29 100644 (file)
@@ -2,10 +2,6 @@
 
 #include "ggml.h"
 
-llama_hparams::llama_hparams() {
-    swa_layers.fill(false);
-}
-
 void llama_hparams::set_swa_pattern(uint32_t n_pattern) {
     for (uint32_t il = 0; il < n_layer; ++il) {
         swa_layers[il] = n_pattern == 0 || (il % n_pattern < (n_pattern - 1));
index fb638dce7fd259991f3cdd6a0a2796b74c6cdaaa..2d72eab180ad0c93cb797e194d73ff38e11547fd 100644 (file)
@@ -145,8 +145,6 @@ struct llama_hparams {
     enum llama_rope_type         rope_type               = LLAMA_ROPE_TYPE_NONE;
     enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
 
-    llama_hparams();
-
     // this value n_pattern means that every nth layer is dense (i.e. non-SWA)
     // note that if n_pattern == 0, all layers are SWA
     //           if n_pattern == 1, all layers are dense
index 227a1bcbaf0eb6355520e002fac3e42698b074ef..81b052e1b1a47c6856fe04bcaec36cba21d7176b 100644 (file)
@@ -463,11 +463,14 @@ void llama_model::load_hparams(llama_model_loader & ml) {
         GGML_ASSERT(hparams.n_expert_used == 0);
     }
 
-    // zero-out the array hparams
     std::fill(hparams.n_head_arr.begin(),    hparams.n_head_arr.end(),    0);
     std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
     std::fill(hparams.n_ff_arr.begin(),      hparams.n_ff_arr.end(),      0);
 
+    std::fill(hparams.rope_sections.begin(), hparams.rope_sections.end(), 0);
+
+    std::fill(hparams.swa_layers.begin(), hparams.swa_layers.end(), 0);
+
     ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH,  hparams.n_ff_arr,   hparams.n_layer, false);
     ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false);