From: pl752 Date: Sat, 3 Jan 2026 10:13:40 +0000 (+0500) Subject: (Bugfix, ggml-cuda) Pool alloc count fix + small size computation type adjustment... X-Git-Tag: upstream/0.0.7721~103 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=9dba9f5352308894bfb8786fcfe7c284168ff8f5;p=pkg%2Fggml%2Fsources%2Fllama.cpp (Bugfix, ggml-cuda) Pool alloc count fix + small size computation type adjustment (#18559) * CUDA: Fixed obj byte size instead of obj count being passed to pool alloc (fattn-common, dst_tmp_meta) * CUDA: Explicitly casted some of the int alloc counts before multiplication in argsort --------- Co-authored-by: pl752 --- diff --git a/ggml/src/ggml-cuda/argsort.cu b/ggml/src/ggml-cuda/argsort.cu index da9652c3..99669200 100644 --- a/ggml/src/ggml-cuda/argsort.cu +++ b/ggml/src/ggml-cuda/argsort.cu @@ -29,8 +29,8 @@ static void argsort_f32_i32_cuda_cub(ggml_cuda_pool & pool, const int nrows, ggml_sort_order order, cudaStream_t stream) { - ggml_cuda_pool_alloc temp_indices_alloc(pool, ncols * nrows); - ggml_cuda_pool_alloc temp_keys_alloc(pool, ncols * nrows); + ggml_cuda_pool_alloc temp_indices_alloc(pool, ((size_t) ncols) * nrows); + ggml_cuda_pool_alloc temp_keys_alloc(pool, ((size_t) ncols) * nrows); ggml_cuda_pool_alloc offsets_alloc(pool, nrows + 1); int * temp_indices = temp_indices_alloc.get(); diff --git a/ggml/src/ggml-cuda/fattn-common.cuh b/ggml/src/ggml-cuda/fattn-common.cuh index 8dc82a9d..fa4e87ee 100644 --- a/ggml/src/ggml-cuda/fattn-common.cuh +++ b/ggml/src/ggml-cuda/fattn-common.cuh @@ -918,7 +918,7 @@ void launch_fattn( blocks_num.y = 1; blocks_num.z = 1; - dst_tmp_meta.alloc(blocks_num.x*ncols * (2*2 + DV) * sizeof(float)); + dst_tmp_meta.alloc(((size_t) blocks_num.x) * ncols * (2 + DV/2)); } else { const int ntiles_KQ = (K->ne[1] + nbatch_fa - 1) / nbatch_fa; // Max. number of parallel blocks limited by tensor size.