]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
cmake : restore LLAMA_LLAMAFILE_DEFAULT
authorGeorgi Gerganov <redacted>
Thu, 25 Apr 2024 18:31:17 +0000 (21:31 +0300)
committerGeorgi Gerganov <redacted>
Thu, 25 Apr 2024 18:37:27 +0000 (21:37 +0300)
CMakeLists.txt
llama.cpp

index 425100ff8035ab735a94cfe7a84642ae36fcea3f..477c5b57c20e7deb7fdfebd277c5ad05d1c7b397 100644 (file)
@@ -43,6 +43,8 @@ else()
     set(LLAMA_METAL_DEFAULT OFF)
 endif()
 
+set(LLAMA_LLAMAFILE_DEFAULT ON)
+
 # general
 option(BUILD_SHARED_LIBS                "build shared libraries"                                OFF)
 option(LLAMA_STATIC                     "llama: static link libraries"                          OFF)
index 0c01a353999b3ca8314bcbf4f371c523f6f8d4b1..d728bd499d38f7084bc5dc1b27df76880ed1e8a5 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -17653,6 +17653,11 @@ const char * llama_print_system_info(void) {
     s += "SSSE3 = "       + std::to_string(ggml_cpu_has_ssse3())       + " | ";
     s += "VSX = "         + std::to_string(ggml_cpu_has_vsx())         + " | ";
     s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
+#ifdef GGML_USE_LLAMAFILE
+    s += "LAMMAFILE = 1 | ";
+#else
+    s += "LAMMAFILE = 0 | ";
+#endif
 
     return s.c_str();
 }