From: Diego Devesa Date: Thu, 5 Jun 2025 09:57:42 +0000 (-0700) Subject: llama : allow using mmap without PrefetchVirtualMemory, apply GGML_WIN_VER to llama... X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=921101041b227c2906cd8051fc01d8c6c364bd60;p=pkg%2Fggml%2Fsources%2Fggml llama : allow using mmap without PrefetchVirtualMemory, apply GGML_WIN_VER to llama.cpp sources (llama/14013) --- diff --git a/CMakeLists.txt b/CMakeLists.txt index 3d01184a..e186fdf3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -137,7 +137,7 @@ set(GGML_CPU_ARM_ARCH "" CACHE STRING "ggml: CPU architecture for ARM") set(GGML_CPU_POWERPC_CPUTYPE "" CACHE STRING "ggml: CPU type for PowerPC") -if (WIN32) +if (MINGW) set(GGML_WIN_VER "0x602" CACHE STRING "ggml: Windows version") endif() diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index abaca7c0..d91dbc46 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -125,7 +125,6 @@ if (NOT MSVC) endif() if (MINGW) - # Target Windows 8 for PrefetchVirtualMemory add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER}) endif()