]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : use std::abs instead of abs (#16853)
authorJan Boon <redacted>
Thu, 30 Oct 2025 06:30:58 +0000 (14:30 +0800)
committerGitHub <redacted>
Thu, 30 Oct 2025 06:30:58 +0000 (08:30 +0200)
src/llama-graph.cpp
src/llama-quant.cpp

index 112d195f2911e8d5e49552814e50d10ded933242..f9751b318369461b4fbaf53b708c1973f1a093dc 100644 (file)
@@ -2035,7 +2035,7 @@ int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buck
 
     if (bidirectional) {
         relative_bucket += (relative_position > 0) * n_buckets;
-        relative_position = abs(relative_position);
+        relative_position = std::abs(relative_position);
     } else {
         relative_position = -std::min<int32_t>(relative_position, 0);
     }
index 6dd40412b488ee947ac1f9bf68fad9f1180946de..a56b2626ae1c5f872ec8cc2a4462f98d63e91e26 100644 (file)
@@ -653,7 +653,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
                 gguf_set_val_f32(ctx_out.get(), o.key, o.val_f64);
             } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
                 // Setting type to UINT32. See https://github.com/ggml-org/llama.cpp/pull/14182 for context
-                gguf_set_val_u32(ctx_out.get(), o.key, (uint32_t)abs(o.val_i64));
+                gguf_set_val_u32(ctx_out.get(), o.key, (uint32_t)std::abs(o.val_i64));
             } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
                 gguf_set_val_bool(ctx_out.get(), o.key, o.val_bool);
             } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) {