// second part effectively subtracts 8 from each quant value
return d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y);
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it
return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1));
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// second part effectively subtracts 16 from each quant value
return d5 * (sumi * ds8f.x - (16*vdr/QI5_0) * ds8f.y);
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
return sumi*d5d8 + m5s8 / (QI5_1 / vdr);
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
return d8_0*d8_1 * sumi;
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
// scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it
return sumi*d8d8 + m8s8 / (QI8_1 / vdr);
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
return dm2f.x*sumf_d - dm2f.y*sumf_m;
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
return d8 * (dm2f.x*sumi_d - dm2f.y*sumi_m);
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
return d3 * sumf;
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
return d3*d8 * sumi;
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
return dm4f.x*sumf_d - dm4f.y*sumf_m;
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
return dm4f.x*sumf_d - dm4f.y*sumf_m;
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
return dm5f.x*sumf_d - dm5f.y*sumf_m;
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
return d*sumf;
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
return d6 * sumf_d;
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
}
return dall * sumf_d - dmin * sumf_m;
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
return d * sumf_d;
#else
+ assert(false);
return 0.0f; // only to satisfy the compiler
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
template <int qk, int qr, int qi, bool need_sum, typename block_q_t, int mmq_x, int mmq_y, int nwarps,
allocate_tiles_cuda_t allocate_tiles, load_tiles_cuda_t load_tiles, int vdr, vec_dot_q_mul_mat_cuda_t vec_dot>
-static __global__ void mul_mat_q(
+static __device__ __forceinline__ void mul_mat_q(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
const int row_dst_0 = blockIdx.x*mmq_y;
const int & row_x_0 = row_dst_0;
- const int row_dst = row_dst_0 + threadIdx.x;
const int col_dst_0 = blockIdx.y*mmq_x;
const int & col_y_0 = col_dst_0;
}
}
-
- if (row_dst >= nrows_dst) {
- return;
- }
-
+#pragma unroll
for (int j = 0; j < mmq_x; j += nwarps) {
const int col_dst = col_dst_0 + j + threadIdx.y;
return;
}
+#pragma unroll
for (int i = 0; i < mmq_y; i += WARP_SIZE) {
- dst[col_dst*nrows_dst + row_dst + i] = sum[i/WARP_SIZE][j/nwarps];
+ const int row_dst = row_dst_0 + threadIdx.x + i;
+
+ if (row_dst >= nrows_dst) {
+ continue;
+ }
+
+ dst[col_dst*nrows_dst + row_dst] = sum[i/WARP_SIZE][j/nwarps];
}
}
}
+#define MMQ_X_Q4_0_AMPERE 64
+#define MMQ_Y_Q4_0_AMPERE 128
+#define NWARPS_Q4_0_AMPERE 4
+#define MMQ_X_Q4_0_PASCAL 64
+#define MMQ_Y_Q4_0_PASCAL 64
+#define NWARPS_Q4_0_PASCAL 8
+
+template <bool need_check> static __global__ void mul_mat_q4_0(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
+
+#if __CUDA_ARCH__ >= CC_TURING
+ const int mmq_x = MMQ_X_Q4_0_AMPERE;
+ const int mmq_y = MMQ_Y_Q4_0_AMPERE;
+ const int nwarps = NWARPS_Q4_0_AMPERE;
+
+ mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
+ load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= MIN_CC_DP4A
+ const int mmq_x = MMQ_X_Q4_0_PASCAL;
+ const int mmq_y = MMQ_Y_Q4_0_PASCAL;
+ const int nwarps = NWARPS_Q4_0_PASCAL;
+
+ mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
+ load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+#else
+ (void) vec_dot_q4_0_q8_1_mul_mat;
+ assert(false);
+#endif // __CUDA_ARCH__ >= CC_TURING
+}
+
+#define MMQ_X_Q4_1_AMPERE 64
+#define MMQ_Y_Q4_1_AMPERE 128
+#define NWARPS_Q4_1_AMPERE 4
+#define MMQ_X_Q4_1_PASCAL 64
+#define MMQ_Y_Q4_1_PASCAL 64
+#define NWARPS_Q4_1_PASCAL 8
+
+template <bool need_check> static __global__ void mul_mat_q4_1(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
+
+#if __CUDA_ARCH__ >= CC_TURING
+ const int mmq_x = MMQ_X_Q4_1_AMPERE;
+ const int mmq_y = MMQ_Y_Q4_1_AMPERE;
+ const int nwarps = NWARPS_Q4_1_AMPERE;
+
+ mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
+ load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= MIN_CC_DP4A
+ const int mmq_x = MMQ_X_Q4_1_PASCAL;
+ const int mmq_y = MMQ_Y_Q4_1_PASCAL;
+ const int nwarps = NWARPS_Q4_1_PASCAL;
+
+ mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
+ load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+#else
+ (void) vec_dot_q4_1_q8_1_mul_mat;
+ assert(false);
+#endif // __CUDA_ARCH__ >= CC_TURING
+}
+
+#define MMQ_X_Q5_0_AMPERE 128
+#define MMQ_Y_Q5_0_AMPERE 64
+#define NWARPS_Q5_0_AMPERE 4
+#define MMQ_X_Q5_0_PASCAL 64
+#define MMQ_Y_Q5_0_PASCAL 64
+#define NWARPS_Q5_0_PASCAL 8
+
+template <bool need_check> static __global__ void mul_mat_q5_0(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
+
+#if __CUDA_ARCH__ >= CC_TURING
+ const int mmq_x = MMQ_X_Q5_0_AMPERE;
+ const int mmq_y = MMQ_Y_Q5_0_AMPERE;
+ const int nwarps = NWARPS_Q5_0_AMPERE;
+
+ mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
+ load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= MIN_CC_DP4A
+ const int mmq_x = MMQ_X_Q5_0_PASCAL;
+ const int mmq_y = MMQ_Y_Q5_0_PASCAL;
+ const int nwarps = NWARPS_Q5_0_PASCAL;
+
+ mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
+ load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+#else
+ (void) vec_dot_q5_0_q8_1_mul_mat;
+ assert(false);
+#endif // __CUDA_ARCH__ >= CC_TURING
+}
+
+#define MMQ_X_Q5_1_AMPERE 128
+#define MMQ_Y_Q5_1_AMPERE 64
+#define NWARPS_Q5_1_AMPERE 4
+#define MMQ_X_Q5_1_PASCAL 64
+#define MMQ_Y_Q5_1_PASCAL 64
+#define NWARPS_Q5_1_PASCAL 8
+
+template <bool need_check> static __global__ void mul_mat_q5_1(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
+
+#if __CUDA_ARCH__ >= CC_TURING
+ const int mmq_x = MMQ_X_Q5_1_AMPERE;
+ const int mmq_y = MMQ_Y_Q5_1_AMPERE;
+ const int nwarps = NWARPS_Q5_1_AMPERE;
+
+ mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
+ load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= MIN_CC_DP4A
+ const int mmq_x = MMQ_X_Q5_1_PASCAL;
+ const int mmq_y = MMQ_Y_Q5_1_PASCAL;
+ const int nwarps = NWARPS_Q5_1_PASCAL;
+
+ mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
+ load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+#else
+ (void) vec_dot_q5_1_q8_1_mul_mat;
+ assert(false);
+#endif // __CUDA_ARCH__ >= CC_TURING
+}
+
+#define MMQ_X_Q8_0_AMPERE 128
+#define MMQ_Y_Q8_0_AMPERE 64
+#define NWARPS_Q8_0_AMPERE 4
+#define MMQ_X_Q8_0_PASCAL 64
+#define MMQ_Y_Q8_0_PASCAL 64
+#define NWARPS_Q8_0_PASCAL 8
+
+template <bool need_check> static __global__ void mul_mat_q8_0(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
+
+#if __CUDA_ARCH__ >= CC_TURING
+ const int mmq_x = MMQ_X_Q8_0_AMPERE;
+ const int mmq_y = MMQ_Y_Q8_0_AMPERE;
+ const int nwarps = NWARPS_Q8_0_AMPERE;
+
+ mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
+ load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= MIN_CC_DP4A
+ const int mmq_x = MMQ_X_Q8_0_PASCAL;
+ const int mmq_y = MMQ_Y_Q8_0_PASCAL;
+ const int nwarps = NWARPS_Q8_0_PASCAL;
+
+ mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
+ load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+#else
+ (void) vec_dot_q8_0_q8_1_mul_mat;
+ assert(false);
+#endif // __CUDA_ARCH__ >= CC_TURING
+}
+
+#define MMQ_X_Q2_K_AMPERE 64
+#define MMQ_Y_Q2_K_AMPERE 128
+#define NWARPS_Q2_K_AMPERE 4
+#define MMQ_X_Q2_K_PASCAL 64
+#define MMQ_Y_Q2_K_PASCAL 64
+#define NWARPS_Q2_K_PASCAL 8
+
+template <bool need_check> static __global__ void mul_mat_q2_K(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
+
+#if __CUDA_ARCH__ >= CC_TURING
+ const int mmq_x = MMQ_X_Q2_K_AMPERE;
+ const int mmq_y = MMQ_Y_Q2_K_AMPERE;
+ const int nwarps = NWARPS_Q2_K_AMPERE;
+
+ mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
+ load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= MIN_CC_DP4A
+ const int mmq_x = MMQ_X_Q2_K_PASCAL;
+ const int mmq_y = MMQ_Y_Q2_K_PASCAL;
+ const int nwarps = NWARPS_Q2_K_PASCAL;
+
+ mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
+ load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+#else
+ (void) vec_dot_q2_K_q8_1_mul_mat;
+ assert(false);
+#endif // __CUDA_ARCH__ >= CC_TURING
+}
+
+#define MMQ_X_Q3_K_AMPERE 128
+#define MMQ_Y_Q3_K_AMPERE 128
+#define NWARPS_Q3_K_AMPERE 4
+#define MMQ_X_Q3_K_PASCAL 64
+#define MMQ_Y_Q3_K_PASCAL 64
+#define NWARPS_Q3_K_PASCAL 8
+
+template <bool need_check> static __global__ void mul_mat_q3_K(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
+
+#if __CUDA_ARCH__ >= CC_TURING
+ const int mmq_x = MMQ_X_Q3_K_AMPERE;
+ const int mmq_y = MMQ_Y_Q3_K_AMPERE;
+ const int nwarps = NWARPS_Q3_K_AMPERE;
+
+ mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
+ load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= MIN_CC_DP4A
+ const int mmq_x = MMQ_X_Q3_K_PASCAL;
+ const int mmq_y = MMQ_Y_Q3_K_PASCAL;
+ const int nwarps = NWARPS_Q3_K_PASCAL;
+
+ mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
+ load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+#else
+ (void) vec_dot_q3_K_q8_1_mul_mat;
+ assert(false);
+#endif // __CUDA_ARCH__ >= CC_TURING
+}
+
+#define MMQ_X_Q4_K_AMPERE 64
+#define MMQ_Y_Q4_K_AMPERE 128
+#define NWARPS_Q4_K_AMPERE 4
+#define MMQ_X_Q4_K_PASCAL 32
+#define MMQ_Y_Q4_K_PASCAL 64
+#define NWARPS_Q4_K_PASCAL 8
+
+template <bool need_check> static __global__ void mul_mat_q4_K(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
+
+#if __CUDA_ARCH__ >= CC_TURING
+ const int mmq_x = MMQ_X_Q4_K_AMPERE;
+ const int mmq_y = MMQ_Y_Q4_K_AMPERE;
+ const int nwarps = NWARPS_Q4_K_AMPERE;
+
+ mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
+ load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= MIN_CC_DP4A
+ const int mmq_x = MMQ_X_Q4_K_PASCAL;
+ const int mmq_y = MMQ_Y_Q4_K_PASCAL;
+ const int nwarps = NWARPS_Q4_K_PASCAL;
+
+ mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
+ load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+#else
+ (void) vec_dot_q4_K_q8_1_mul_mat;
+ assert(false);
+#endif // __CUDA_ARCH__ >= CC_TURING
+}
+
+#define MMQ_X_Q5_K_AMPERE 64
+#define MMQ_Y_Q5_K_AMPERE 128
+#define NWARPS_Q5_K_AMPERE 4
+#define MMQ_X_Q5_K_PASCAL 64
+#define MMQ_Y_Q5_K_PASCAL 64
+#define NWARPS_Q5_K_PASCAL 8
+
+template <bool need_check> static __global__ void mul_mat_q5_K(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
+
+#if __CUDA_ARCH__ >= CC_TURING
+ const int mmq_x = MMQ_X_Q5_K_AMPERE;
+ const int mmq_y = MMQ_Y_Q5_K_AMPERE;
+ const int nwarps = NWARPS_Q5_K_AMPERE;
+
+ mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
+ load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= MIN_CC_DP4A
+ const int mmq_x = MMQ_X_Q5_K_PASCAL;
+ const int mmq_y = MMQ_Y_Q5_K_PASCAL;
+ const int nwarps = NWARPS_Q5_K_PASCAL;
+
+ mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
+ load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+#else
+ (void) vec_dot_q5_K_q8_1_mul_mat;
+ assert(false);
+#endif // __CUDA_ARCH__ >= CC_TURING
+}
+
+#define MMQ_X_Q6_K_AMPERE 64
+#define MMQ_Y_Q6_K_AMPERE 64
+#define NWARPS_Q6_K_AMPERE 4
+#define MMQ_X_Q6_K_PASCAL 32
+#define MMQ_Y_Q6_K_PASCAL 64
+#define NWARPS_Q6_K_PASCAL 8
+
+template <bool need_check> static __global__ void mul_mat_q6_K(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
+
+#if __CUDA_ARCH__ >= CC_TURING
+ const int mmq_x = MMQ_X_Q6_K_AMPERE;
+ const int mmq_y = MMQ_Y_Q6_K_AMPERE;
+ const int nwarps = NWARPS_Q6_K_AMPERE;
+
+ mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
+ load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= MIN_CC_DP4A
+ const int mmq_x = MMQ_X_Q6_K_PASCAL;
+ const int mmq_y = MMQ_Y_Q6_K_PASCAL;
+ const int nwarps = NWARPS_Q6_K_PASCAL;
+
+ mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
+ load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+#else
+ (void) vec_dot_q6_K_q8_1_mul_mat;
+ assert(false);
+#endif // __CUDA_ARCH__ >= CC_TURING
+}
+
template <int qk, int qi, typename block_q_t, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda>
static __global__ void mul_mat_vec_q(const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols, const int nrows) {
const int row = blockIdx.y*blockDim.y + threadIdx.y;
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
+ int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
- const int mmq_x = 64;
- const int mmq_y = 128;
- const int nwarps = 4;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
- load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
- load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ mmq_x = MMQ_X_Q4_0_AMPERE;
+ mmq_y = MMQ_Y_Q4_0_AMPERE;
+ nwarps = NWARPS_Q4_0_AMPERE;
+ } else if (compute_capability >= MIN_CC_DP4A) {
+ mmq_x = MMQ_X_Q4_0_PASCAL;
+ mmq_y = MMQ_Y_Q4_0_PASCAL;
+ nwarps = NWARPS_Q4_0_PASCAL;
} else {
- const int mmq_x = 64;
- const int mmq_y = 64;
- const int nwarps = 4;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
- load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
- load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ GGML_ASSERT(false);
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const dim3 block_nums(block_num_x, block_num_y, 1);
+ const dim3 block_dims(WARP_SIZE, nwarps, 1);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ mul_mat_q4_0<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+ } else {
+ const bool need_check = true;
+ mul_mat_q4_0<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
+ int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
- const int mmq_x = 64;
- const int mmq_y = 128;
- const int nwarps = 4;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
- load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
- load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ mmq_x = MMQ_X_Q4_1_AMPERE;
+ mmq_y = MMQ_Y_Q4_1_AMPERE;
+ nwarps = NWARPS_Q4_1_AMPERE;
+ } else if (compute_capability >= MIN_CC_DP4A) {
+ mmq_x = MMQ_X_Q4_1_PASCAL;
+ mmq_y = MMQ_Y_Q4_1_PASCAL;
+ nwarps = NWARPS_Q4_1_PASCAL;
} else {
- const int mmq_x = 64;
- const int mmq_y = 64;
- const int nwarps = 8;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
- load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
- load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ GGML_ASSERT(false);
+ }
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const dim3 block_nums(block_num_x, block_num_y, 1);
+ const dim3 block_dims(WARP_SIZE, nwarps, 1);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ mul_mat_q4_1<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+ } else {
+ const bool need_check = true;
+ mul_mat_q4_1<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
+ int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
- const int mmq_x = 128;
- const int mmq_y = 64;
- const int nwarps = 4;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
- load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
- load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ mmq_x = MMQ_X_Q5_0_AMPERE;
+ mmq_y = MMQ_Y_Q5_0_AMPERE;
+ nwarps = NWARPS_Q5_0_AMPERE;
+ } else if (compute_capability >= MIN_CC_DP4A) {
+ mmq_x = MMQ_X_Q5_0_PASCAL;
+ mmq_y = MMQ_Y_Q5_0_PASCAL;
+ nwarps = NWARPS_Q5_0_PASCAL;
} else {
- const int mmq_x = 64;
- const int mmq_y = 64;
- const int nwarps = 8;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
- load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
- load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ GGML_ASSERT(false);
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const dim3 block_nums(block_num_x, block_num_y, 1);
+ const dim3 block_dims(WARP_SIZE, nwarps, 1);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ mul_mat_q5_0<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+ } else {
+ const bool need_check = true;
+ mul_mat_q5_0<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
+ int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
- const int mmq_x = 128;
- const int mmq_y = 64;
- const int nwarps = 8;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
- load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
- load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ mmq_x = MMQ_X_Q5_1_AMPERE;
+ mmq_y = MMQ_Y_Q5_1_AMPERE;
+ nwarps = NWARPS_Q5_1_AMPERE;
+ } else if (compute_capability >= MIN_CC_DP4A) {
+ mmq_x = MMQ_X_Q5_1_PASCAL;
+ mmq_y = MMQ_Y_Q5_1_PASCAL;
+ nwarps = NWARPS_Q5_1_PASCAL;
} else {
- const int mmq_x = 64;
- const int mmq_y = 64;
- const int nwarps = 8;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
- load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
- load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ GGML_ASSERT(false);
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const dim3 block_nums(block_num_x, block_num_y, 1);
+ const dim3 block_dims(WARP_SIZE, nwarps, 1);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ mul_mat_q5_1<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+ } else {
+ const bool need_check = true;
+ mul_mat_q5_1<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
+ int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
- const int mmq_x = 128;
- const int mmq_y = 64;
- const int nwarps = 4;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
- load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
- load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ mmq_x = MMQ_X_Q8_0_AMPERE;
+ mmq_y = MMQ_Y_Q8_0_AMPERE;
+ nwarps = NWARPS_Q8_0_AMPERE;
+ } else if (compute_capability >= MIN_CC_DP4A) {
+ mmq_x = MMQ_X_Q8_0_PASCAL;
+ mmq_y = MMQ_Y_Q8_0_PASCAL;
+ nwarps = NWARPS_Q8_0_PASCAL;
} else {
- const int mmq_x = 64;
- const int mmq_y = 64;
- const int nwarps = 8;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
- load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
- load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ GGML_ASSERT(false);
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const dim3 block_nums(block_num_x, block_num_y, 1);
+ const dim3 block_dims(WARP_SIZE, nwarps, 1);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ mul_mat_q8_0<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+ } else {
+ const bool need_check = true;
+ mul_mat_q8_0<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
+ int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
- const int mmq_x = 64;
- const int mmq_y = 128;
- const int nwarps = 4;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
- load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
- load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ mmq_x = MMQ_X_Q2_K_AMPERE;
+ mmq_y = MMQ_Y_Q2_K_AMPERE;
+ nwarps = NWARPS_Q2_K_AMPERE;
+ } else if (compute_capability >= MIN_CC_DP4A) {
+ mmq_x = MMQ_X_Q2_K_PASCAL;
+ mmq_y = MMQ_Y_Q2_K_PASCAL;
+ nwarps = NWARPS_Q2_K_PASCAL;
} else {
- const int mmq_x = 64;
- const int mmq_y = 64;
- const int nwarps = 8;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
- load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
- load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ GGML_ASSERT(false);
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const dim3 block_nums(block_num_x, block_num_y, 1);
+ const dim3 block_dims(WARP_SIZE, nwarps, 1);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ mul_mat_q2_K<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+ } else {
+ const bool need_check = true;
+ mul_mat_q2_K<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
+ int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
- const int mmq_x = 128;
- const int mmq_y = 128;
- const int nwarps = 4;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
- load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
- load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ mmq_x = MMQ_X_Q3_K_AMPERE;
+ mmq_y = MMQ_Y_Q3_K_AMPERE;
+ nwarps = NWARPS_Q3_K_AMPERE;
+ } else if (compute_capability >= MIN_CC_DP4A) {
+ mmq_x = MMQ_X_Q3_K_PASCAL;
+ mmq_y = MMQ_Y_Q3_K_PASCAL;
+ nwarps = NWARPS_Q3_K_PASCAL;
} else {
- const int mmq_x = 64;
- const int mmq_y = 64;
- const int nwarps = 8;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
- load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
- load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ GGML_ASSERT(false);
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const dim3 block_nums(block_num_x, block_num_y, 1);
+ const dim3 block_dims(WARP_SIZE, nwarps, 1);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ mul_mat_q3_K<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+ } else {
+ const bool need_check = true;
+ mul_mat_q3_K<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
+ int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
- const int mmq_x = 64;
- const int mmq_y = 128;
- const int nwarps = 4;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
- load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
- load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ mmq_x = MMQ_X_Q4_K_AMPERE;
+ mmq_y = MMQ_Y_Q4_K_AMPERE;
+ nwarps = NWARPS_Q4_K_AMPERE;
+ } else if (compute_capability >= MIN_CC_DP4A) {
+ mmq_x = MMQ_X_Q4_K_PASCAL;
+ mmq_y = MMQ_Y_Q4_K_PASCAL;
+ nwarps = NWARPS_Q4_K_PASCAL;
} else {
- const int mmq_x = 32;
- const int mmq_y = 64;
- const int nwarps = 8;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
- load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
- load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ GGML_ASSERT(false);
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const dim3 block_nums(block_num_x, block_num_y, 1);
+ const dim3 block_dims(WARP_SIZE, nwarps, 1);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ mul_mat_q4_K<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+ } else {
+ const bool need_check = true;
+ mul_mat_q4_K<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
+ int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
- const int mmq_x = 64;
- const int mmq_y = 128;
- const int nwarps = 4;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
- load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
- load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ mmq_x = MMQ_X_Q5_K_AMPERE;
+ mmq_y = MMQ_Y_Q5_K_AMPERE;
+ nwarps = NWARPS_Q5_K_AMPERE;
+ } else if (compute_capability >= MIN_CC_DP4A) {
+ mmq_x = MMQ_X_Q5_K_PASCAL;
+ mmq_y = MMQ_Y_Q5_K_PASCAL;
+ nwarps = NWARPS_Q5_K_PASCAL;
} else {
- const int mmq_x = 64;
- const int mmq_y = 64;
- const int nwarps = 8;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
- load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
- load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ GGML_ASSERT(false);
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const dim3 block_nums(block_num_x, block_num_y, 1);
+ const dim3 block_dims(WARP_SIZE, nwarps, 1);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ mul_mat_q5_K<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+ } else {
+ const bool need_check = true;
+ mul_mat_q5_K<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}
CUDA_CHECK(cudaGetDevice(&id));
const int compute_capability = g_compute_capabilities[id];
+ int mmq_x, mmq_y, nwarps;
if (compute_capability >= CC_TURING) {
- const int mmq_x = 64;
- const int mmq_y = 64;
- const int nwarps = 4;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
- load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
- load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ mmq_x = MMQ_X_Q6_K_AMPERE;
+ mmq_y = MMQ_Y_Q6_K_AMPERE;
+ nwarps = NWARPS_Q6_K_AMPERE;
+ } else if (compute_capability >= MIN_CC_DP4A) {
+ mmq_x = MMQ_X_Q6_K_PASCAL;
+ mmq_y = MMQ_Y_Q6_K_PASCAL;
+ nwarps = NWARPS_Q6_K_PASCAL;
} else {
- const int mmq_x = 32;
- const int mmq_y = 64;
- const int nwarps = 8;
-
- const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
- const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
- const dim3 block_nums(block_num_x, block_num_y, 1);
- const dim3 block_dims(WARP_SIZE, nwarps, 1);
-
- if (nrows_x % mmq_y == 0) {
- const bool need_check = false;
- mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
- load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- } else {
- const bool need_check = true;
- mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
- load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
- <<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
- }
+ GGML_ASSERT(false);
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const dim3 block_nums(block_num_x, block_num_y, 1);
+ const dim3 block_dims(WARP_SIZE, nwarps, 1);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ mul_mat_q6_K<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+ } else {
+ const bool need_check = true;
+ mul_mat_q6_K<need_check><<<block_nums, block_dims, 0, stream>>>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
}
}