]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
ggml : add and use ggml_cpu_has_llamafile() (#8664)
authorGeorgi Gerganov <redacted>
Thu, 25 Jul 2024 09:37:42 +0000 (12:37 +0300)
committerGitHub <redacted>
Thu, 25 Jul 2024 09:37:42 +0000 (12:37 +0300)
ggml/include/ggml.h
ggml/src/ggml.c
src/llama.cpp

index 2fdb9fa40274bce1d74e2e2bda9a5f223f589441..548661b9bb6368f495cbedace3e9d0f48c1a6085 100644 (file)
@@ -2400,6 +2400,7 @@ extern "C" {
     GGML_API int ggml_cpu_has_vsx        (void);
     GGML_API int ggml_cpu_has_matmul_int8(void);
     GGML_API int ggml_cpu_has_cann       (void);
+    GGML_API int ggml_cpu_has_llamafile  (void);
 
     //
     // Internal types and functions exposed for tests and benchmarks
index dbb3a3ebe1cca45465fcbd63a9d78402c4b5d71c..f65837e856ac35f12fa91ded964f64de1ad0e462 100644 (file)
@@ -22005,6 +22005,14 @@ int ggml_cpu_has_cann(void) {
 #endif
 }
 
+int ggml_cpu_has_llamafile(void) {
+#if defined(GGML_USE_LLAMAFILE)
+    return 1;
+#else
+    return 0;
+#endif
+}
+
 int ggml_cpu_has_gpublas(void) {
     return ggml_cpu_has_cuda() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() || ggml_cpu_has_sycl();
 }
index 9e502018dfb764720bea8701b9f9d73a227da9b6..80235ae19b27053f9ecdce68a591590668e30067 100644 (file)
@@ -19146,11 +19146,7 @@ const char * llama_print_system_info(void) {
     s += "SSSE3 = "       + std::to_string(ggml_cpu_has_ssse3())       + " | ";
     s += "VSX = "         + std::to_string(ggml_cpu_has_vsx())         + " | ";
     s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
-#ifdef GGML_USE_LLAMAFILE
-    s += "LLAMAFILE = 1 | ";
-#else
-    s += "LLAMAFILE = 0 | ";
-#endif
+    s += "LLAMAFILE = "   + std::to_string(ggml_cpu_has_llamafile())   + " | ";
 
     return s.c_str();
 }