From: Howard Su Date: Sat, 17 Jun 2023 15:46:15 +0000 (+0800) Subject: ggml : fix warnings under MSVC (#1908) X-Git-Tag: gguf-v0.4.0~609 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=3d59ec5935ea1d33e9d51060a8dd737169b9b89b;p=pkg%2Fggml%2Fsources%2Fllama.cpp ggml : fix warnings under MSVC (#1908) --- diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 7edd1a9f..fed2a7ce 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -13,6 +13,10 @@ #include "ggml-cuda.h" #include "ggml.h" +#if defined(_MSC_VER) +#pragma warning(disable: 4244 4267) // possible loss of data +#endif + static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); #define CUDA_CHECK(err) \ diff --git a/ggml-opencl.cpp b/ggml-opencl.cpp index 1d4db96e..95f4cec6 100644 --- a/ggml-opencl.cpp +++ b/ggml-opencl.cpp @@ -15,6 +15,10 @@ #include "ggml.h" +#if defined(_MSC_VER) +#pragma warning(disable: 4244 4267) // possible loss of data +#endif + #define CL_DMMV_BLOCK_SIZE 32 #define MULTILINE_QUOTE(...) #__VA_ARGS__ diff --git a/llama.cpp b/llama.cpp index 81f047ed..a50846f7 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1253,7 +1253,7 @@ static void llama_model_load_internal( vram_scratch = n_batch * MB; ggml_cuda_set_scratch_size(vram_scratch); if (n_gpu_layers > 0) { - fprintf(stderr, "%s: allocating batch_size x 1 MB = %ld MB VRAM for the scratch buffer\n", + fprintf(stderr, "%s: allocating batch_size x 1 MB = %zd MB VRAM for the scratch buffer\n", __func__, vram_scratch / MB); } }