]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : fix chat template gguf key (#11201)
authorXuan Son Nguyen <redacted>
Sun, 12 Jan 2025 12:45:14 +0000 (13:45 +0100)
committerGitHub <redacted>
Sun, 12 Jan 2025 12:45:14 +0000 (13:45 +0100)
common/common.cpp
src/llama-arch.cpp

index 39bfb0c2e2dbc4f71868353f02d50ae458113bd5..1a2e1524799d3e540ecb0fb0e9ee52a5a9904d13 100644 (file)
@@ -1636,15 +1636,8 @@ std::string common_detokenize(const struct llama_vocab * vocab, const std::vecto
 //
 
 std::string common_get_builtin_chat_template(const struct llama_model * model) {
-    static const char * template_key = "tokenizer.chat_template";
-    // call with NULL buffer to get the total size of the string
-    int32_t res = llama_model_meta_val_str(model, template_key, NULL, 0);
-    if (res > 0) {
-        std::vector<char> model_template(res + 1, 0);
-        llama_model_meta_val_str(model, template_key, model_template.data(), model_template.size());
-        return std::string(model_template.data(), model_template.size() - 1);
-    }
-    return "";
+    const char * ptr_tmpl = llama_model_chat_template(model);
+    return ptr_tmpl == nullptr ? "" : ptr_tmpl;
 }
 
 bool common_chat_verify_template(const std::string & tmpl) {
index 5c1f14cfdef53f170d6d6b076a764bf8131d81b3..d7d277e72977afcb96a32fc7027a71147d4a397f 100644 (file)
@@ -178,7 +178,7 @@ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
     { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap"     },
     { LLM_KV_TOKENIZER_HF_JSON,              "tokenizer.huggingface.json"              },
     { LLM_KV_TOKENIZER_RWKV,                 "tokenizer.rwkv.world"                    },
-    { LLM_KV_TOKENIZER_CHAT_TEMPLATE,        "tokenizer.chat.template"                 },
+    { LLM_KV_TOKENIZER_CHAT_TEMPLATE,        "tokenizer.chat_template"                 },
     { LLM_KV_TOKENIZER_FIM_PRE_ID,           "tokenizer.ggml.fim_pre_token_id"         },
     { LLM_KV_TOKENIZER_FIM_SUF_ID,           "tokenizer.ggml.fim_suf_token_id"         },
     { LLM_KV_TOKENIZER_FIM_MID_ID,           "tokenizer.ggml.fim_mid_token_id"         },