From: R0CKSTAR Date: Fri, 26 Sep 2025 00:56:10 +0000 (+0800) Subject: musa: fix build warnings (llama/15611) X-Git-Tag: v0.9.4~33 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=552855839ba3d4daa6126dd1dcd01cef39b6c350;p=pkg%2Fggml%2Fsources%2Fggml musa: fix build warnings (llama/15611) Signed-off-by: Xiaodong Ye --- diff --git a/src/ggml-cuda/binbcast.cu b/src/ggml-cuda/binbcast.cu index 725e1a81..60240102 100644 --- a/src/ggml-cuda/binbcast.cu +++ b/src/ggml-cuda/binbcast.cu @@ -54,7 +54,7 @@ static __global__ void k_bin_bcast(const src0_t * src0, const uint32_t i2 = fastdiv((blockDim.z * blockIdx.z + threadIdx.z), ne3); const uint32_t i3 = (blockDim.z * blockIdx.z + threadIdx.z) - (i2 * ne3.z); - if (i0s >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3.z) { + if (i0s >= (uint32_t)ne0 || i1 >= (uint32_t)ne1 || i2 >= (uint32_t)ne2 || i3 >= ne3.z) { return; } diff --git a/src/ggml-cuda/mmq.cu b/src/ggml-cuda/mmq.cu index 714b23f9..12bdc629 100644 --- a/src/ggml-cuda/mmq.cu +++ b/src/ggml-cuda/mmq.cu @@ -81,7 +81,7 @@ static __global__ void mmq_ids_helper( #pragma unroll for (int offset = neu_padded; offset < warp_size; offset += neu_padded) { const int tmp = __shfl_up_sync(0xFFFFFFFF, it_compact_add_self, offset, warp_size); - if (threadIdx.x >= offset) { + if (threadIdx.x >= static_cast(offset)) { it_compact_add_lower += tmp; } } @@ -110,7 +110,7 @@ static __global__ void mmq_ids_helper( expert_bounds[expert] = nex_prev; - if (expert < gridDim.x - 1) { + if (expert < static_cast(gridDim.x) - 1) { return; } diff --git a/src/ggml-cuda/mmvq.cu b/src/ggml-cuda/mmvq.cu index 52de4e78..3bf0c9ed 100644 --- a/src/ggml-cuda/mmvq.cu +++ b/src/ggml-cuda/mmvq.cu @@ -220,7 +220,7 @@ static __global__ void mul_mat_vec_q( tmp[j][i] = warp_reduce_sum(tmp[j][i]); } - if (threadIdx.x < rows_per_cuda_block && (rows_per_cuda_block == 1 || row0 + int(threadIdx.x) < stride_col_dst)) { + if (threadIdx.x < rows_per_cuda_block && (rows_per_cuda_block == 1 || uint32_t(row0 + threadIdx.x) < stride_col_dst)) { dst[j*stride_col_dst + threadIdx.x] = tmp[j][threadIdx.x]; } } diff --git a/src/ggml-cuda/pad_reflect_1d.cu b/src/ggml-cuda/pad_reflect_1d.cu index 0478889d..32993eb5 100644 --- a/src/ggml-cuda/pad_reflect_1d.cu +++ b/src/ggml-cuda/pad_reflect_1d.cu @@ -51,6 +51,8 @@ static __global__ __launch_bounds__(CUDA_PAD_REFLECT_1D_BLOCK_SIZE, 1) void } const float value = *(const float *) (src0_ptr + src_idx * nb00); *(float *) (dst_ptr + i0 * nb0) = value; + + GGML_UNUSED(p1); } void ggml_cuda_op_pad_reflect_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {