// copy destination pointers to GPU
CUDA_CHECK(cudaMemcpyAsync(cuda_graph->dest_ptrs_d, host_dest_ptrs, host_dest_ptrs_size*sizeof(char *), cudaMemcpyHostToDevice, stream));
cuda_graph->graph_cpynode_index = 0; // reset index
+#else
+ GGML_UNUSED(cuda_graph); GGML_UNUSED(host_dest_ptrs);
+ GGML_UNUSED(host_dest_ptrs_size); GGML_UNUSED(stream);
#endif
}
T sum = 0.0f;
#pragma unroll
- for (int k_KQ_0 = 0; k_KQ_0 < D/sizeof(int); k_KQ_0 += warp_size) {
+ for (int k_KQ_0 = 0; k_KQ_0 < int(D/sizeof(int)); k_KQ_0 += warp_size) {
const int k_KQ = k_KQ_0 + threadIdx.x;
const int ib = k_KQ / QI8_1;
T sum = 0.0f;
#pragma unroll
- for (int k_KQ_0 = 0; k_KQ_0 < D/sizeof(int); k_KQ_0 += warp_size) {
+ for (int k_KQ_0 = 0; k_KQ_0 < int(D/sizeof(int)); k_KQ_0 += warp_size) {
const int k_KQ = k_KQ_0 + threadIdx.x;
const int ib = k_KQ / QI8_1;
T sum = 0.0f;
#pragma unroll
- for (int k_KQ_0 = 0; k_KQ_0 < D/sizeof(int); k_KQ_0 += warp_size) {
+ for (int k_KQ_0 = 0; k_KQ_0 < int(D/sizeof(int)); k_KQ_0 += warp_size) {
const int k_KQ = k_KQ_0 + threadIdx.x;
const int ib = k_KQ / QI8_1;
T sum = 0.0f;
#pragma unroll
- for (int k_KQ_0 = 0; k_KQ_0 < D/sizeof(int); k_KQ_0 += warp_size) {
+ for (int k_KQ_0 = 0; k_KQ_0 < int(D/sizeof(int)); k_KQ_0 += warp_size) {
const int k_KQ = k_KQ_0 + threadIdx.x;
const int ib = k_KQ / QI8_1;
T sum = 0.0f;
#pragma unroll
- for (int k_KQ_0 = 0; k_KQ_0 < D/sizeof(int); k_KQ_0 += warp_size) {
+ for (int k_KQ_0 = 0; k_KQ_0 < int(D/sizeof(int)); k_KQ_0 += warp_size) {
const int k_KQ = k_KQ_0 + threadIdx.x;
const int ib = k_KQ / QI8_0;
return;
#endif // FP16_MMA_AVAILABLE
if (use_logit_softcap && !(D == 128 || D == 256)) {
+ GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask);
+ GGML_UNUSED(dst); GGML_UNUSED(dst_meta); GGML_UNUSED(scale);
+ GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1);
+ GGML_UNUSED(n_head_log2); GGML_UNUSED(logit_softcap);
+ GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02);
+ GGML_UNUSED(ne03); GGML_UNUSED(ne10); GGML_UNUSED(ne11);
+ GGML_UNUSED(ne12); GGML_UNUSED(ne13); GGML_UNUSED(ne31);
+ GGML_UNUSED(nb31); GGML_UNUSED(nb01); GGML_UNUSED(nb02);
+ GGML_UNUSED(nb03); GGML_UNUSED(nb11); GGML_UNUSED(nb12);
+ GGML_UNUSED(nb13); GGML_UNUSED(nb21); GGML_UNUSED(nb22);
+ GGML_UNUSED(nb23); GGML_UNUSED(ne0); GGML_UNUSED(ne1);
+ GGML_UNUSED(ne2); GGML_UNUSED(ne3);
NO_DEVICE_CODE;
return;
}
// Skip unused kernel variants for faster compilation:
if (use_logit_softcap && !(D == 128 || D == 256)) {
+ GGML_UNUSED(Q); GGML_UNUSED(K); GGML_UNUSED(V); GGML_UNUSED(mask);
+ GGML_UNUSED(dst); GGML_UNUSED(dst_meta); GGML_UNUSED(scale);
+ GGML_UNUSED(max_bias); GGML_UNUSED(m0); GGML_UNUSED(m1);
+ GGML_UNUSED(n_head_log2); GGML_UNUSED(logit_softcap);
+ GGML_UNUSED(ne00); GGML_UNUSED(ne01); GGML_UNUSED(ne02);
+ GGML_UNUSED(ne03); GGML_UNUSED(ne10); GGML_UNUSED(ne11);
+ GGML_UNUSED(ne12); GGML_UNUSED(ne13); GGML_UNUSED(ne31);
+ GGML_UNUSED(nb31); GGML_UNUSED(nb01); GGML_UNUSED(nb02);
+ GGML_UNUSED(nb03); GGML_UNUSED(nb11); GGML_UNUSED(nb12);
+ GGML_UNUSED(nb13); GGML_UNUSED(nb21); GGML_UNUSED(nb22);
+ GGML_UNUSED(nb23); GGML_UNUSED(ne0); GGML_UNUSED(ne1);
+ GGML_UNUSED(ne2); GGML_UNUSED(ne3);
NO_DEVICE_CODE;
return;
}
// Set memory to zero if out of bounds:
if (ncols > 2 && ic0 + j >= ne01) {
#pragma unroll
- for (int i0 = 0; i0 < D/sizeof(int); i0 += WARP_SIZE) {
+ for (int i0 = 0; i0 < int(D/sizeof(int)); i0 += WARP_SIZE) {
const int i = i0 + threadIdx.x;
tmp_q_i32[i] = 0;
const float * Q_f = (const float *) (Q + j*nb01);
#pragma unroll
- for (int i0 = 0; i0 < D/sizeof(int); i0 += WARP_SIZE) {
+ for (int i0 = 0; i0 < int(D/sizeof(int)); i0 += WARP_SIZE) {
quantize_q8_1_to_shared<float2>(Q_f + 4*i0, scale, tmp_q_i32, tmp_q_ds);
}
}
float2 * tmp_q_ds = (float2 *) (tmp_q_i32 + D/sizeof(int));
#pragma unroll
- for (int i0 = 0; i0 < D/sizeof(int); i0 += WARP_SIZE) {
+ for (int i0 = 0; i0 < int(D/sizeof(int)); i0 += WARP_SIZE) {
const int i = i0 + threadIdx.x;
Q_i32[j][i0/WARP_SIZE] = tmp_q_i32[i];