From: ddh0 Date: Wed, 4 Mar 2026 08:53:38 +0000 (-0600) Subject: impl : use 6 digits for tensor dims (#20094) X-Git-Tag: upstream/0.0.8611~415 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=c99909dd0b6589070e2a129fa172c40fa1675453;p=pkg%2Fggml%2Fsources%2Fllama.cpp impl : use 6 digits for tensor dims (#20094) Many models have vocabulary sizes, and thus tensor shapes, with more than 5 digits (ex: Gemma 3's vocab size is 262,208). I already fixed this for `llama_format_tensor_shape` but missed it for `llama_format_tensor_shape` until now. Oops. --- diff --git a/src/llama-impl.cpp b/src/llama-impl.cpp index 710a5a1e0..4c0188ee7 100644 --- a/src/llama-impl.cpp +++ b/src/llama-impl.cpp @@ -100,9 +100,9 @@ std::string format(const char * fmt, ...) { std::string llama_format_tensor_shape(const std::vector & ne) { char buf[256]; - snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0)); + snprintf(buf, sizeof(buf), "%6" PRId64, ne.at(0)); for (size_t i = 1; i < ne.size(); i++) { - snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i)); + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %6" PRId64, ne.at(i)); } return buf; }