]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : allow using mmap without PrefetchVirtualMemory, apply GGML_WIN_VER to llama...
authorDiego Devesa <redacted>
Thu, 5 Jun 2025 09:57:42 +0000 (02:57 -0700)
committerGitHub <redacted>
Thu, 5 Jun 2025 09:57:42 +0000 (11:57 +0200)
CMakeLists.txt
ggml/CMakeLists.txt
ggml/src/CMakeLists.txt
src/llama-mmap.cpp

index ac3e9090336d9b1248bcf7583a4ca8d75800f404..f73470dffd106bdcbb19daa9250608b4d02db047 100644 (file)
@@ -159,6 +159,11 @@ if (NOT TARGET ggml AND NOT LLAMA_USE_SYSTEM_GGML)
     # ... otherwise assume ggml is added by a parent CMakeLists.txt
 endif()
 
+if (MINGW)
+    # Target Windows 8 for PrefetchVirtualMemory
+    add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
+endif()
+
 #
 # build the library
 #
index 3d01184a2ee6b41aa817ef0319521383ae6c4f86..e186fdf3c03f7816a7fbfa40826e022a9a023296 100644 (file)
@@ -137,7 +137,7 @@ set(GGML_CPU_ARM_ARCH        "" CACHE STRING "ggml: CPU architecture for ARM")
 set(GGML_CPU_POWERPC_CPUTYPE "" CACHE STRING "ggml: CPU type for PowerPC")
 
 
-if (WIN32)
+if (MINGW)
     set(GGML_WIN_VER "0x602" CACHE STRING   "ggml: Windows version")
 endif()
 
index 76b24bd9d118f8eb1da390177fe77087134947a0..7dcb031f0f9c6c85c092653d7f8d528189ea9db5 100644 (file)
@@ -125,7 +125,6 @@ if (NOT MSVC)
 endif()
 
 if (MINGW)
-    # Target Windows 8 for PrefetchVirtualMemory
     add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
 endif()
 
index 9da97f1bc5057d830943307a57120e497176bb56..47497cf953fd3990d6e147e4837980bed6223b63 100644 (file)
@@ -401,7 +401,7 @@ struct llama_mmap::impl {
                 }
             }
 #else
-            throw std::runtime_error("PrefetchVirtualMemory unavailable");
+            LLAMA_LOG_DEBUG("skipping PrefetchVirtualMemory because _WIN32_WINNT < 0x602\n");
 #endif
         }
     }