]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
cuda : check if this fixes Pascal card regression (#3882)
authorGeorgi Gerganov <redacted>
Thu, 2 Nov 2023 06:35:10 +0000 (08:35 +0200)
committerGitHub <redacted>
Thu, 2 Nov 2023 06:35:10 +0000 (08:35 +0200)
ggml-cuda.cu

index 57a528ede23ed24da855e4ca2e4cfb87e2b9aeca..e4629512611b6c8effc34d17ad3f4bf77c367a08 100644 (file)
@@ -7420,7 +7420,7 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
     } else if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
         // KQV single-batch
         ggml_cuda_mul_mat_vec_nc(src0, src1, dst);
-    } else if (all_on_device && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) {
+    } else if (all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) {
         // KQ + KQV multi-batch
         ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst);
     } else if (src0->type == GGML_TYPE_F32) {