]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
build : move _WIN32_WINNT definition to headers (#17736)
authorAdrien Gallouët <redacted>
Thu, 4 Dec 2025 06:04:02 +0000 (07:04 +0100)
committerGitHub <redacted>
Thu, 4 Dec 2025 06:04:02 +0000 (07:04 +0100)
Previously, cmake was forcing `_WIN32_WINNT=0x0A00` for MinGW builds,
This caused "macro redefined" warnings with toolchains that define the version.

This also removes the `GGML_WIN_VER` variable as it is no longer needed.

Signed-off-by: Adrien Gallouët <redacted>
CMakeLists.txt
common/common.h
ggml/CMakeLists.txt
ggml/include/ggml.h
ggml/src/CMakeLists.txt
tools/server/CMakeLists.txt

index 11c9b878e79f1de2e450a1e838305b295bafe774..c231ec0e3fa147782782334f68d2387dbeda85c6 100644 (file)
@@ -199,11 +199,6 @@ if (NOT TARGET ggml AND NOT LLAMA_USE_SYSTEM_GGML)
     # ... otherwise assume ggml is added by a parent CMakeLists.txt
 endif()
 
-if (MINGW)
-    # Target Windows 8 for PrefetchVirtualMemory
-    add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
-endif()
-
 #
 # build the library
 #
index 6e6b2c1cab69d0128e535df2ebc47ca580131bb7..179113a4dbfe165b07e18430f56b9d752c590eb5 100644 (file)
 #include <vector>
 #include <map>
 
+#if defined(_WIN32) && !defined(_WIN32_WINNT)
+#define _WIN32_WINNT 0x0A00
+#endif
+
 #ifdef _WIN32
 #define DIRECTORY_SEPARATOR '\\'
 #else
index db47ae8dff2f963be379abd7c5d3da57eefe8d96..0ccd901921db1e7f5d6e9da6e5e518dcd1321f0e 100644 (file)
@@ -175,11 +175,6 @@ option(GGML_CPU_ALL_VARIANTS "ggml: build all variants of the CPU backend (requi
 set(GGML_CPU_ARM_ARCH        "" CACHE STRING "ggml: CPU architecture for ARM")
 set(GGML_CPU_POWERPC_CPUTYPE "" CACHE STRING "ggml: CPU type for PowerPC")
 
-
-if (MINGW)
-    set(GGML_WIN_VER "0xA00" CACHE STRING   "ggml: Windows version")
-endif()
-
 # ggml core
 set(GGML_SCHED_MAX_COPIES  "4" CACHE STRING "ggml: max input copies for pipeline parallelism")
 option(GGML_CPU                             "ggml: enable CPU backend"                        ON)
index e665614670d4802a8c1ead1a62a4aa12e7dd87a8..b0e10f57685e9e2ed5be866e2499b8499c9d4241 100644 (file)
 #    define GGML_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
 #endif
 
+#if defined(_WIN32) && !defined(_WIN32_WINNT)
+#    define _WIN32_WINNT 0x0A00
+#endif
+
 #include <stdbool.h>
 #include <stddef.h>
 #include <stdint.h>
index d93664b8b58bb2029c921e4fafc0292d92a80dc0..98606e9cf1854a15363c4a87793e6a68369a97e0 100644 (file)
@@ -127,10 +127,6 @@ if (NOT MSVC)
     endif()
 endif()
 
-if (MINGW)
-    add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
-endif()
-
 #
 # POSIX conformance
 #
index fb71c7aa7be872c43bba5916aab259427dc3f24c..1aa659a906692b84d14c683300f2c6f804d6b579 100644 (file)
@@ -2,11 +2,6 @@ set(TARGET llama-server)
 
 include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
 
-if (MINGW)
-    # fix: https://github.com/ggml-org/llama.cpp/actions/runs/9651004652/job/26617901362?pr=8006
-    add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
-endif()
-
 if (NOT LLAMA_HTTPLIB)
     message(FATAL_ERROR "LLAMA_HTTPLIB is OFF, cannot build llama-server. Hint: to skip building server, set -DLLAMA_BUILD_SERVER=OFF")
 endif()