]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
convert-hf : Handle NotImplementedError in convert-hf-to-gguf (#7660)
authorGalunid <redacted>
Fri, 31 May 2024 15:42:33 +0000 (17:42 +0200)
committerGitHub <redacted>
Fri, 31 May 2024 15:42:33 +0000 (17:42 +0200)
convert-hf-to-gguf.py

index 9f29cda234e42b7d1a1823b306c5dfa397f5ffbb..ad071b97404f7bb7b7357187fb1b1da8d9d93016 100755 (executable)
@@ -2840,7 +2840,12 @@ def main() -> None:
     hparams = Model.load_hparams(dir_model)
 
     with torch.inference_mode():
-        model_class = Model.from_model_architecture(hparams["architectures"][0])
+        try:
+            model_class = Model.from_model_architecture(hparams["architectures"][0])
+        except NotImplementedError:
+            logger.error(f"Model {hparams['architectures'][0]} is not supported")
+            sys.exit(1)
+
         model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian, args.use_temp_file, args.no_lazy)
 
         logger.info("Set model parameters")