]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Don't crash on ftype (formerly f16) == 4 (#917)
authorStephan Walter <redacted>
Wed, 12 Apr 2023 15:06:16 +0000 (15:06 +0000)
committerGitHub <redacted>
Wed, 12 Apr 2023 15:06:16 +0000 (15:06 +0000)
llama.cpp
llama.h

index 653558be94885eb1d58c81aee7b84845afdddbd6..6d8b706b982587893e578c0131f684c816eda0de 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -827,7 +827,9 @@ static const char *llama_ftype_name(enum llama_ftype ftype) {
         case LLAMA_FTYPE_MOSTLY_F16:  return "mostly F16";
         case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
         case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
-        default: LLAMA_ASSERT(false);
+        case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
+                                      return "mostly Q4_1, some F16";
+        default:                      return "unknown, may not work";
     }
 }
 
diff --git a/llama.h b/llama.h
index 8a0d50fb80cec630e15bd5f4296f3e87b0eca66e..7a258a1e16d35127b2f66f90171f8fbbca4821d6 100644 (file)
--- a/llama.h
+++ b/llama.h
@@ -71,6 +71,7 @@ extern "C" {
         LLAMA_FTYPE_MOSTLY_F16  = 1,  // except 1d tensors
         LLAMA_FTYPE_MOSTLY_Q4_0 = 2,  // except 1d tensors
         LLAMA_FTYPE_MOSTLY_Q4_1 = 3,  // except 1d tensors
+        LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
     };
 
     LLAMA_API struct llama_context_params llama_context_default_params();