]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : make load error reporting more granular (#5477)
authorAarni Koskela <redacted>
Tue, 13 Feb 2024 13:24:50 +0000 (15:24 +0200)
committerGitHub <redacted>
Tue, 13 Feb 2024 13:24:50 +0000 (15:24 +0200)
Makes it easier to pinpoint where e.g. `unordered_map::at: key not found` comes from.

llama.cpp

index 381a030683cb54a9154a52146d8c39f8957651c9..61c695187def84d091b699a85083f7222b76e8a4 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -4384,9 +4384,21 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
 
         model.hparams.vocab_only = params.vocab_only;
 
-        llm_load_arch   (ml, model);
-        llm_load_hparams(ml, model);
-        llm_load_vocab  (ml, model);
+        try {
+            llm_load_arch(ml, model);
+        } catch(const std::exception & e) {
+            throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
+        }
+        try {
+            llm_load_hparams(ml, model);
+        } catch(const std::exception & e) {
+            throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
+        }
+        try {
+            llm_load_vocab(ml, model);
+        } catch(const std::exception & e) {
+            throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
+        }
 
         llm_load_print_meta(ml, model);