]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
minor : add const qualifiers (#2853)
authorm3ndax <redacted>
Fri, 1 Sep 2023 13:47:27 +0000 (15:47 +0200)
committerGitHub <redacted>
Fri, 1 Sep 2023 13:47:27 +0000 (16:47 +0300)
* made the methods const

# Conflicts:
# examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp

* made method const

* Update convert-llama2c-to-ggml.cpp

removed write_raw and write_u32

* llama2c : remove misleading const

---------

Co-authored-by: Georgi Gerganov <redacted>
examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
llama.cpp

index 0b03c9d2b46186db8cd96432c3d4bdf4b29f169e..0ee7adc523a34cbe03777786ba494218d8e9c78c 100644 (file)
@@ -637,7 +637,7 @@ void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab)
     }
 }
 
-void stuff_karpathy_weights_into_gg(struct ggml_tensor * gg_weights, float * karpathy_weights){
+void stuff_karpathy_weights_into_gg(struct ggml_tensor * gg_weights, const float * karpathy_weights) {
     int ct;
     switch (gg_weights->n_dims){
         case 1:
index 5ca119238777eb571f6447caccbe6890031cf26b..23b251caf385396c6cac924497601c62186ffce4 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -4393,7 +4393,7 @@ struct llama_logit_info {
         }
         return min_heap;
     }
-    float probability_from_logit(float logit) {
+    float probability_from_logit(float logit) const {
         return normalizer * std::exp(logit - max_l);
     }
 };