From: Johannes Gäßler Date: Tue, 6 Aug 2024 15:13:55 +0000 (+0200) Subject: CUDA: fix padding logic for FP16/FP32 (llama/8884) X-Git-Tag: upstream/0.0.1642~462 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=9510e3cd55b5b1814cf529aa620de008ee788239;p=pkg%2Fggml%2Fsources%2Fggml CUDA: fix padding logic for FP16/FP32 (llama/8884) --- diff --git a/src/ggml-cuda.cu b/src/ggml-cuda.cu index 68605fff..654f93e8 100644 --- a/src/ggml-cuda.cu +++ b/src/ggml-cuda.cu @@ -1501,7 +1501,7 @@ static void ggml_cuda_op_mul_mat( } // If src0 is on a temporary compute buffers (partial offloading) there may be some padding that needs to be cleared: - if (ne00 % MATRIX_ROW_PADDING != 0 && ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE && src0->view_src == nullptr) { + if (ne00 % MATRIX_ROW_PADDING != 0 && ggml_is_quantized(src0->type) && ggml_backend_buffer_get_usage(src0->buffer) == GGML_BACKEND_BUFFER_USAGE_COMPUTE && src0->view_src == nullptr) { const int64_t nbytes_data = ggml_row_size(src0->type, (dev[id].row_high - dev[id].row_low)*ne00); const int64_t nbytes_padding = ggml_row_size(src0->type, MATRIX_ROW_PADDING - ne00 % MATRIX_ROW_PADDING); CUDA_CHECK(cudaMemsetAsync(dev[id].src0_dd + nbytes_data , 0, nbytes_padding, stream));