]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Update docs for yarn_ext_factor <0.0 as unspecified instead of NaN (#4189)
authorcrasm <redacted>
Sat, 25 Nov 2023 15:47:07 +0000 (10:47 -0500)
committerGitHub <redacted>
Sat, 25 Nov 2023 15:47:07 +0000 (10:47 -0500)
convert.py [changed mode: 0644->0755]
llama.h

old mode 100644 (file)
new mode 100755 (executable)
diff --git a/llama.h b/llama.h
index 1a62058d1406bc32e9a97f1dc05c53038ad2ecf5..89cb6198e84b8c3e9c72abafdb00331460e8072f 100644 (file)
--- a/llama.h
+++ b/llama.h
@@ -185,7 +185,7 @@ extern "C" {
         // ref: https://github.com/ggerganov/llama.cpp/pull/2054
         float    rope_freq_base;   // RoPE base frequency, 0 = from model
         float    rope_freq_scale;  // RoPE frequency scaling factor, 0 = from model
-        float    yarn_ext_factor;  // YaRN extrapolation mix factor, NaN = from model
+        float    yarn_ext_factor;  // YaRN extrapolation mix factor, negative = from model
         float    yarn_attn_factor; // YaRN magnitude scaling factor
         float    yarn_beta_fast;   // YaRN low correction dim
         float    yarn_beta_slow;   // YaRN high correction dim