From: Georgi Gerganov Date: Thu, 2 Nov 2023 06:35:10 +0000 (+0200) Subject: cuda : check if this fixes Pascal card regression (#3882) X-Git-Tag: upstream/0.0.4488~3020 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=4d719a6d4e74b9a98e75f826f865f3153717d54b;p=pkg%2Fggml%2Fsources%2Fllama.cpp cuda : check if this fixes Pascal card regression (#3882) --- diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 57a528ed..e4629512 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -7420,7 +7420,7 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 } else if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) { // KQV single-batch ggml_cuda_mul_mat_vec_nc(src0, src1, dst); - } else if (all_on_device && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { + } else if (all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) { // KQ + KQV multi-batch ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst); } else if (src0->type == GGML_TYPE_F32) {