*s = sumf;
#else
- const uint8_t pow3[6] = {1, 3, 9, 27, 81, 243};
-
- float sumf = 0.0f;
-
- for (int i = 0; i < nb; ++i) {
- int sum = 0;
-
- for (size_t j = 0; j < sizeof(x->qs) - sizeof(x->qs) % 32; j += 32) {
- for (size_t l = 0; l < 5; ++l) {
- for (size_t m = 0; m < 32; ++m) {
- uint8_t q = x[i].qs[j + m] * pow3[l];
- uint16_t xi = ((uint16_t) q * 3) >> 8;
- sum += (xi - 1) * y[i].qs[j*5 + l*32 + m];
- }
- }
- }
- for (size_t j = sizeof(x->qs) - sizeof(x->qs) % 32; j < sizeof(x->qs); j += 16) {
- for (size_t l = 0; l < 5; ++l) {
- for (size_t m = 0; m < 16; ++m) {
- uint8_t q = x[i].qs[j + m] * pow3[l];
- uint16_t xi = ((uint16_t) q * 3) >> 8;
- sum += (xi - 1) * y[i].qs[j*5 + l*16 + m];
- }
- }
- }
-
- for (size_t l = 0; l < 4; ++l) {
- for (size_t j = 0; j < sizeof(x->qh); ++j) {
- uint8_t q = x[i].qh[j] * pow3[l];
- uint16_t xi = ((uint16_t) q * 3) >> 8;
- sum += (xi - 1) * y[i].qs[sizeof(x->qs)*5 + l*sizeof(x->qh) + j];
- }
- }
-
- sumf += (float) sum * (GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d);
- }
-
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_tq1_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
- float sumf = 0.0f;
-
- for (int i = 0; i < nb; ++i) {
- int32_t sumi = 0;
-
- for (size_t j = 0; j < sizeof(x->qs); j += 32) {
- for (size_t l = 0; l < 4; ++l) {
- for (size_t k = 0; k < 32; ++k) {
- sumi += y[i].qs[j*4 + l*32 + k] * (((x[i].qs[j + k] >> (l*2)) & 3) - 1);
- }
- }
- }
-
- const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
-
- sumf += (float) sumi * d;
- }
-
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_tq2_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sum;
#else
-
- float sumf = 0;
-
- for (int i = 0; i < nb; ++i) {
-
- const uint8_t * q2 = x[i].qs;
- const int8_t * q8 = y[i].qs;
- const uint8_t * sc = x[i].scales;
-
- int summs = 0;
- for (int j = 0; j < 16; ++j) {
- summs += y[i].bsums[j] * (sc[j] >> 4);
- }
-
- const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
- const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
-
- int isum = 0;
- int is = 0;
- int d;
- for (int k = 0; k < QK_K/128; ++k) {
- int shift = 0;
- for (int j = 0; j < 4; ++j) {
- d = sc[is++] & 0xF;
- int isuml = 0;
- for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- d = sc[is++] & 0xF;
- isuml = 0;
- for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- shift += 2;
- q8 += 32;
- }
- q2 += 32;
- }
- sumf += dall * isum - dmin * summs;
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sum;
#else
- // scalar version
- // This function is written like this so the compiler can manage to vectorize most of it
- // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
- // manually vectorized version above. Every other version I tried would run at least 4 times slower.
- // The ideal situation would be if we could just write the code once, and the compiler would
- // automatically produce the best possible set of machine instructions, instead of us having to manually
- // write vectorized versions for AVX, ARM_NEON, etc.
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- uint32_t auxs[4];
- const int8_t * scales = (const int8_t*)auxs;
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q3 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].hmask;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- q3 += 32;
- }
- a = aux8;
-
- memcpy(auxs, x[i].scales, 12);
- uint32_t tmp = auxs[2];
- auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
- auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
- auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
- auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
-
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
-
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- a += 32;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- a += 32; q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
-
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
}
*s = sum;
#else
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].ql;
- const uint8_t * GGML_RESTRICT qh = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- }
- a += 128;
- q4 += 64;
- qh += 32;
- }
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- int scale = x[i].scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.25f * sumf;
#else
-
- uint32_t aux32[2];
- const uint8_t * aux8 = (const uint8_t *)aux32;
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * GGML_RESTRICT q2 = x[i].qs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- memcpy(aux32, q2, 2*sizeof(uint32_t));
- q2 += 4;
- const uint32_t ls = 2*(aux32[1] >> 28) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
- const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls;
- }
- sumf += d * bsum;
- }
- *s = 0.125f * sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.125f * sumf;
#else
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * GGML_RESTRICT q2 = x[i].qs;
- const uint8_t * GGML_RESTRICT sc = x[i].scales;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1;
- const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 2; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls1;
- sumi = 0;
- for (int l = 2; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls2;
- q2 += 4;
- }
- sumf += d * bsum;
- }
- *s = 0.125f * sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.125f * sumf;
#else
-
- float sumf = 0;
- for (int i = 0; i < nb; i++) {
-
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint8_t * qh = x[i].qh;
- const uint8_t * signs = qs + QK_K/8;
-
- int bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf);
- int ls2 = 1 + 2*(x[i].scales[ib32] >> 4);
- int sumi1 = 0, sumi2 = 0;
- for (int l = 0; l < 2; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
- for (int j = 0; j < 8; ++j) {
- sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- for (int l = 2; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
- for (int j = 0; j < 8; ++j) {
- sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += ls1 * sumi1 + ls2 * sumi2;
- qs += 4;
- signs += 4;
- }
-
- sumf += d * bsum;
- }
-
- *s = 0.125f * sumf;
-
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.5f * sumf;
#else
-
- uint32_t aux32;
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint8_t * GGML_RESTRICT q3 = x[i].qs;
- const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t);
- const uint32_t ls = 2*(aux32 >> 28) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]);
- const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]);
- const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- q3 += 8;
- bsum += sumi * ls;
- }
- sumf += d * bsum;
- }
- *s = 0.25f * sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq3_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint8_t * GGML_RESTRICT qs = x[i].qs;
- const uint8_t * GGML_RESTRICT qh = x[i].qh;
- const uint8_t * GGML_RESTRICT signs = x[i].signs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
- const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1;
- const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256)));
- const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256)));
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- qs += 8;
- signs += 4;
- bsum += sumi * ls1;
- sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256)));
- const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256)));
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- qs += 8;
- signs += 4;
- bsum += sumi * ls2;
- }
- sumf += d * bsum;
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq3_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
-
- float sumf = 0;
- for (int i = 0; i < nb; i++) {
-
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint16_t * qh = x[i].qh;
-
- int sumi = 0, sumi1 = 0;
- for (int ib = 0; ib < QK_K/32; ++ib) {
- const int ls = 2*((qh[ib] >> 12) & 7) + 1;
- const int delta = qh[ib] & 0x8000 ? -1 : 1;
- int lsum = 0;
- for (int l = 0; l < 4; ++l) {
- const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8)));
- for (int j = 0; j < 8; ++j) {
- lsum += q8[j] * grid[j];
- }
- q8 += 8;
- }
- sumi += ls * lsum;
- sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]);
- qs += 4;
- }
-
- sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1);
- }
-
- *s = sumf;
-
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq1_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
-
- int sum1[2], sum2[2], delta[4];
-
- float sumf = 0;
- for (int i = 0; i < nb; i++) {
-
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint8_t * qh = x[i].qh;
- const uint16_t * sc = (const uint16_t *)x[i].scales;
-
- scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
-
- int sumi1 = 0, sumi2 = 0;
- for (int ib = 0; ib < QK_K/32; ++ib) {
- delta[0] = qh[0] & 0x08 ? -1 : 1;
- delta[1] = qh[0] & 0x80 ? -1 : 1;
- delta[2] = qh[1] & 0x08 ? -1 : 1;
- delta[3] = qh[1] & 0x80 ? -1 : 1;
- sum1[0] = sum1[1] = sum2[0] = sum2[1] = 0;
- for (int l = 0; l < 4; ++l) {
- const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((uint16_t)qh[l/2] << (8 - 4*(l%2))) & 0x700)));
- int lsum1 = 0, lsum2 = 0;
- for (int j = 0; j < 8; ++j) {
- lsum1 += q8[j] * grid[j];
- lsum2 += q8[j];
- }
- q8 += 8;
- sum1[l/2] += lsum1;
- sum2[l/2] += lsum2*delta[l];
- }
-
- const int ls1 = 2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1;
- const int ls2 = 2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1;
-
- sumi1 += sum1[0] * ls1 + sum1[1] * ls2;
- sumi2 += sum2[0] * ls1 + sum2[1] * ls2;
- qs += 4;
- qh += 2;
- }
-
- sumf += GGML_CPU_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2);
- }
-
- *s = sumf;
-
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(scale);
+ ggml_vec_dot_iq1_m_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
- float sumf = 0;
- for (int ibl = 0; ibl < nb; ++ibl) {
- const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d;
- uint16_t h = x[ibl].scales_h;
- const uint8_t * qs = x[ibl].qs;
- const int8_t * q8 = y[ibl].qs;
- for (int ib = 0; ib < QK_K/32; ib += 2) {
- const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30);
- const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30);
- h >>= 4;
- const float d1 = d4d8*(ls1 - 32);
- const float d2 = d4d8*(ls2 - 32);
- int sumi1 = 0, sumi2 = 0;
- for (int j = 0; j < 16; ++j) {
- sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
- sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
- }
- sumf += d1 * (sumi1 + sumi2);
- qs += 16;
- q8 += 32;
- sumi1 = sumi2 = 0;
- for (int j = 0; j < 16; ++j) {
- sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
- sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
- }
- sumf += d2 * (sumi1 + sumi2);
- qs += 16;
- q8 += 32;
- }
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
}
}
#else
- // scalar
- const int blck_size_interleave = 4;
- float srcv[4][QK8_0];
- float id[4];
-
- for (int i = 0; i < nb; i++) {
- for (int row_iter = 0; row_iter < 4; row_iter++) {
- float amax = 0.0f; // absolute max
-
- for (int j = 0; j < QK8_0; j++) {
- srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j];
- amax = MAX(amax, fabsf(srcv[row_iter][j]));
- }
-
- const float d = amax / ((1 << 7) - 1);
- id[row_iter] = d ? 1.0f / d : 0.0f;
-
- y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d);
- }
-
- for (int j = 0; j < QK8_0 * 4; j++) {
- int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave;
- int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave;
- src_offset += (j % blck_size_interleave);
-
- float x0 = srcv[src_id][src_offset] * id[src_id];
- y[i].qs[j] = roundf(x0);
- }
- }
+ UNUSED(nb);
+ UNUSED(y);
+ ggml_quantize_mat_q8_0_4x4_generic(x, vy, k);
#endif
}
}
#else
- // scalar
- const int blck_size_interleave = 8;
- float srcv[4][QK8_0];
- float id[4];
-
- for (int i = 0; i < nb; i++) {
- for (int row_iter = 0; row_iter < 4; row_iter++) {
- float amax = 0.0f; // absolute max
-
- for (int j = 0; j < QK8_0; j++) {
- srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j];
- amax = MAX(amax, fabsf(srcv[row_iter][j]));
- }
-
- const float d = amax / ((1 << 7) - 1);
- id[row_iter] = d ? 1.0f / d : 0.0f;
-
- y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d);
- }
-
- for (int j = 0; j < QK8_0 * 4; j++) {
- int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave;
- int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave;
- src_offset += (j % blck_size_interleave);
-
- float x0 = srcv[src_id][src_offset] * id[src_id];
- y[i].qs[j] = roundf(x0);
- }
- }
+ UNUSED(nb);
+ UNUSED(y);
+ ggml_quantize_mat_q8_0_4x8_generic(x, vy, k);
#endif
}
}
return;
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
- float sumf[4];
- int sumi;
-
- const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb);
-
- for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0;
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4;
- }
- sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d);
- }
- }
- }
- for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j];
- }
+ ggml_gemv_q4_0_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc);
}
void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
}
return;
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
- float sumf[4];
- int sumi;
-
- const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb);
-
- for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0;
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4;
- }
- sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d);
- }
- }
- }
- for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j];
- }
+ ggml_gemv_q4_0_4x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
}
void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
#endif // #if defined(__ARM_FEATURE_SVE)
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
- {
- float sumf[8];
- int sumi;
-
- const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb);
-
- for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0;
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4;
- }
- sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d);
- }
- }
- }
- for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j];
- }
- }
+ ggml_gemv_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
}
void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
}
return;
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
- {
- float sumf[4];
- int sumi;
-
- const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb);
-
- for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0;
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F];
- const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4];
- sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2]));
- }
- sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d);
- }
- }
- }
- for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j];
- }
- }
+ ggml_gemv_iq4_nl_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc);
}
void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
);
return;
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
- {
- float sumf[4][4];
- int sumi;
-
- for (int y = 0; y < nr / 4; y++) {
- const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb);
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb);
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0;
- }
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) +
- (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4;
- }
- sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]);
- }
- }
- }
- }
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++)
- s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j];
- }
- }
- }
- }
+ ggml_gemm_q4_0_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc);
}
void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
);
return;
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
- float sumf[4][4];
- int sumi;
-
- for (int y = 0; y < nr / 4; y++) {
- const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb);
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx + (x * nb);
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0;
- }
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) +
- (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4;
- }
- sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]);
- }
- }
- }
- }
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++)
- s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j];
- }
- }
- }
+ ggml_gemm_q4_0_4x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
}
void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
#endif // #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8)
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
- float sumf[4][8];
- int sumi;
-
- for (int y = 0; y < nr / 4; y++) {
- const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb);
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb);
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0;
- }
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) +
- (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4;
- }
- sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]);
- }
- }
- }
- }
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++)
- s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j];
- }
- }
- }
+ ggml_gemm_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
}
void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
}
return;
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
- {
- float sumf[4][4];
- int sumi;
-
- for (int y = 0; y < nr / 4; y++) {
- const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb);
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb);
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0;
- }
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0x0F];
- const int v1 = kvalues_iq4nl[b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4];
- sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) +
- (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4]));
- }
- sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]);
- }
- }
- }
- }
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++)
- s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j];
- }
- }
- }
- }
+ ggml_gemm_iq4_nl_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc);
}
sumf = hsum_float_8(acc) + summs;
-#endif
- for (; ib < nb; ++ib) {
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const int v0 = (x[ib].qs[j] & 0x0F);
- const int v1 = (x[ib].qs[j] >> 4);
-
- sumi0 += (v0 * y[ib].qs[j]);
- sumi1 += (v1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
- }
-
*s = sumf;
+#else
+ UNUSED(nb);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(ib);
+ UNUSED(sumf);
+ ggml_vec_dot_q4_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
sumf = hsum_float_8(acc);
-#endif
- for (; ib < nb; ++ib) {
- uint32_t qh;
- memcpy(&qh, x[ib].qh, sizeof(qh));
-
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
- const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
-
- const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16);
- const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16);
-
- sumi0 += (x0 * y[ib].qs[j]);
- sumi1 += (x1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi;
- }
-
*s = sumf;
+#else
+ UNUSED(nb);
+ UNUSED(ib);
+ UNUSED(sumf);
+ UNUSED(x);
+ UNUSED(y);
+ ggml_vec_dot_q5_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
sumf = hsum_float_8(acc) + summs;
-#endif
- for (; ib < nb; ++ib) {
- uint32_t qh;
- memcpy(&qh, x[ib].qh, sizeof(qh));
-
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
-
- const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0;
- const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1;
-
- sumi0 += (x0 * y[ib].qs[j]);
- sumi1 += (x1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
- }
-
*s = sumf;
+#else
+ UNUSED(nb);
+ UNUSED(ib);
+ UNUSED(sumf);
+ UNUSED(x);
+ UNUSED(y);
+ ggml_vec_dot_q5_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
sumf = hsum_float_8(acc);
-#endif
- for (; ib < nb; ++ib) {
- int sumi = 0;
-
- for (int j = 0; j < qk; j++) {
- sumi += x[ib].qs[j]*y[ib].qs[j];
- }
-
- sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d));
- }
-
*s = sumf;
+#else
+ UNUSED(nb);
+ UNUSED(ib);
+ UNUSED(sumf);
+ UNUSED(x);
+ UNUSED(y);
+ ggml_vec_dot_q8_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = hsum_float_8(acc);
#else
-
- float sumf = 0;
-
- for (int i = 0; i < nb; ++i) {
-
- const uint8_t * q2 = x[i].qs;
- const int8_t * q8 = y[i].qs;
- const uint8_t * sc = x[i].scales;
-
- int summs = 0;
- for (int j = 0; j < 16; ++j) {
- summs += y[i].bsums[j] * (sc[j] >> 4);
- }
-
- const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
- const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
-
- int isum = 0;
- int is = 0;
- int d;
- for (int k = 0; k < QK_K/128; ++k) {
- int shift = 0;
- for (int j = 0; j < 4; ++j) {
- d = sc[is++] & 0xF;
- int isuml = 0;
- for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- d = sc[is++] & 0xF;
- isuml = 0;
- for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- shift += 2;
- q8 += 32;
- }
- q2 += 32;
- }
- sumf += dall * isum - dmin * summs;
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(acc);
#else
- // scalar version
- // This function is written like this so the compiler can manage to vectorize most of it
- // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
- // manually vectorized version above. Every other version I tried would run at least 4 times slower.
- // The ideal situation would be if we could just write the code once, and the compiler would
- // automatically produce the best possible set of machine instructions, instead of us having to manually
- // write vectorized versions for AVX, ARM_NEON, etc.
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- uint32_t auxs[4];
- const int8_t * scales = (const int8_t*)auxs;
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q3 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].hmask;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- q3 += 32;
- }
- a = aux8;
-
- memcpy(auxs, x[i].scales, 12);
- uint32_t tmp = auxs[2];
- auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
- auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
- auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
- auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
-
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
-
}
void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = hsum_float_8(acc) + ((v4f32)acc_m)[0];
#else
-
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- a += 32;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- a += 32; q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(acc) + ((v4f32)acc_m)[0];
#else
-
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(acc);
#else
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].ql;
- const uint8_t * GGML_RESTRICT qh = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- }
- a += 128;
- q4 += 64;
- qh += 32;
- }
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- int scale = x[i].scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.125f * hsum_float_8(accumf);
#else
-
- uint32_t aux32[2];
- const uint8_t * aux8 = (const uint8_t *)aux32;
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * GGML_RESTRICT q2 = x[i].qs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- memcpy(aux32, q2, 2*sizeof(uint32_t));
- q2 += 4;
- const uint32_t ls = 2*(aux32[1] >> 28) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
- const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls;
- }
- sumf += d * bsum;
- }
- *s = 0.125f * sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.125f * hsum_float_8(accumf);
#else
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * GGML_RESTRICT q2 = x[i].qs;
- const uint8_t * GGML_RESTRICT sc = x[i].scales;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1;
- const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 2; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls1;
- sumi = 0;
- for (int l = 2; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls2;
- q2 += 4;
- }
- sumf += d * bsum;
- }
- *s = 0.125f * sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.125f * hsum_float_8(accumf);
#else
-
- float sumf = 0;
- for (int i = 0; i < nb; i++) {
-
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint8_t * qh = x[i].qh;
- const uint8_t * signs = qs + QK_K/8;
-
- int bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf);
- int ls2 = 1 + 2*(x[i].scales[ib32] >> 4);
- int sumi1 = 0, sumi2 = 0;
- for (int l = 0; l < 2; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
- for (int j = 0; j < 8; ++j) {
- sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- for (int l = 2; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
- for (int j = 0; j < 8; ++j) {
- sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += ls1 * sumi1 + ls2 * sumi2;
- qs += 4;
- signs += 4;
- }
-
- sumf += d * bsum;
- }
-
- *s = 0.125f * sumf;
-
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
-
}
void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = 0.25f * hsum_float_8(accumf);
#else
-
- uint32_t aux32;
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint8_t * GGML_RESTRICT q3 = x[i].qs;
- const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t);
- const uint32_t ls = 2*(aux32 >> 28) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]);
- const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]);
- const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- q3 += 8;
- bsum += sumi * ls;
- }
- sumf += d * bsum;
- }
- *s = 0.25f * sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq3_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(accumf);
#else
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint8_t * GGML_RESTRICT qs = x[i].qs;
- const uint8_t * GGML_RESTRICT qh = x[i].qh;
- const uint8_t * GGML_RESTRICT signs = x[i].signs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
- const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1;
- const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256)));
- const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256)));
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- qs += 8;
- signs += 4;
- bsum += sumi * ls1;
- sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256)));
- const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256)));
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- qs += 8;
- signs += 4;
- bsum += sumi * ls2;
- }
- sumf += d * bsum;
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq3_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(accum) + IQ1S_DELTA * accum1;
#else
-
- float sumf = 0;
- for (int i = 0; i < nb; i++) {
-
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint16_t * qh = x[i].qh;
-
- int sumi = 0, sumi1 = 0;
- for (int ib = 0; ib < QK_K/32; ++ib) {
- const int ls = 2*((qh[ib] >> 12) & 7) + 1;
- const int delta = qh[ib] & 0x8000 ? -1 : 1;
- int lsum = 0;
- for (int l = 0; l < 4; ++l) {
- const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8)));
- for (int j = 0; j < 8; ++j) {
- lsum += q8[j] * grid[j];
- }
- q8 += 8;
- }
- sumi += ls * lsum;
- sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]);
- qs += 4;
- }
-
- sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1);
- }
-
- *s = sumf;
-
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq1_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(accum);
#else
- float sumf = 0;
- for (int ibl = 0; ibl < nb; ++ibl) {
- const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d;
- uint16_t h = x[ibl].scales_h;
- const uint8_t * qs = x[ibl].qs;
- const int8_t * q8 = y[ibl].qs;
- for (int ib = 0; ib < QK_K/32; ib += 2) {
- const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30);
- const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30);
- h >>= 4;
- const float d1 = d4d8*(ls1 - 32);
- const float d2 = d4d8*(ls2 - 32);
- int sumi1 = 0, sumi2 = 0;
- for (int j = 0; j < 16; ++j) {
- sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
- sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
- }
- sumf += d1 * (sumi1 + sumi2);
- qs += 16;
- q8 += 32;
- sumi1 = sumi2 = 0;
- for (int j = 0; j < 16; ++j) {
- sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
- sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
- }
- sumf += d2 * (sumi1 + sumi2);
- qs += 16;
- q8 += 32;
- }
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
sumf = vec_extract(vsumf0, 0);
-#endif
- for (; ib < nb; ++ib) {
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const int v0 = (x[ib].qs[j] & 0x0F) - 8;
- const int v1 = (x[ib].qs[j] >> 4) - 8;
-
- sumi0 += (v0 * y[ib].qs[j]);
- sumi1 += (v1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d);
- }
-
*s = sumf;
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(ib);
+ UNUSED(sumf);
+ ggml_vec_dot_q4_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
sumf = vec_extract(vsumf0, 0);
-#endif
- for (; ib < nb; ++ib) {
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const int v0 = (x[ib].qs[j] & 0x0F);
- const int v1 = (x[ib].qs[j] >> 4);
-
- sumi0 += (v0 * y[ib].qs[j]);
- sumi1 += (v1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
- }
-
*s = sumf;
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(ib);
+ UNUSED(sumf);
+ ggml_vec_dot_q4_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
sumf = vec_extract(vsumf0, 0);
-#endif
- for (; ib < nb; ++ib) {
- uint32_t qh;
- memcpy(&qh, x[ib].qh, sizeof(qh));
-
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
- const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
-
- const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16);
- const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16);
-
- sumi0 += (x0 * y[ib].qs[j]);
- sumi1 += (x1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi;
- }
-
*s = sumf;
+#else
+ UNUSED(ib);
+ UNUSED(sumf);
+ UNUSED(x);
+ UNUSED(y);
+ ggml_vec_dot_q5_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
sumf = vec_extract(vsumf0, 0);
-#endif
- for (; ib < nb; ++ib) {
- uint32_t qh;
- memcpy(&qh, x[ib].qh, sizeof(qh));
-
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
-
- const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0;
- const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1;
-
- sumi0 += (x0 * y[ib].qs[j]);
- sumi1 += (x1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
- }
-
*s = sumf;
+#else
+ UNUSED(nb);
+ UNUSED(ib);
+ UNUSED(sumf);
+ UNUSED(x);
+ UNUSED(y);
+ ggml_vec_dot_q5_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
sumf = vec_extract(vsumf0, 0);
-#endif
- for (; ib < nb; ++ib) {
- int sumi = 0;
-
- for (int j = 0; j < qk; j++) {
- sumi += x[ib].qs[j]*y[ib].qs[j];
- }
-
- sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d));
- }
-
*s = sumf;
+#else
+ UNUSED(nb);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(ib);
+ UNUSED(sumf);
+ ggml_vec_dot_q8_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = vec_extract(vsumf0, 0);
#else
-
- float sumf = 0;
-
- for (int i = 0; i < nb; ++i) {
-
- const uint8_t * q2 = x[i].qs;
- const int8_t * q8 = y[i].qs;
- const uint8_t * sc = x[i].scales;
-
- int summs = 0;
- for (int j = 0; j < 16; ++j) {
- summs += y[i].bsums[j] * (sc[j] >> 4);
- }
-
- const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
- const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
-
- int isum = 0;
- int is = 0;
- int d;
- for (int k = 0; k < QK_K/128; ++k) {
- int shift = 0;
- for (int j = 0; j < 4; ++j) {
- d = sc[is++] & 0xF;
- int isuml = 0;
- for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- d = sc[is++] & 0xF;
- isuml = 0;
- for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- shift += 2;
- q8 += 32;
- }
- q2 += 32;
- }
- sumf += dall * isum - dmin * summs;
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = vec_extract(vsumf0, 0);
#else
- // scalar version
- // This function is written like this so the compiler can manage to vectorize most of it
- // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
- // manually vectorized version above. Every other version I tried would run at least 4 times slower.
- // The ideal situation would be if we could just write the code once, and the compiler would
- // automatically produce the best possible set of machine instructions, instead of us having to manually
- // write vectorized versions for AVX, ARM_NEON, etc.
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- uint32_t auxs[4];
- const int8_t * scales = (const int8_t*)auxs;
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q3 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].hmask;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- q3 += 32;
- }
- a = aux8;
-
- memcpy(auxs, x[i].scales, 12);
- uint32_t tmp = auxs[2];
- auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
- auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
- auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
- auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
-
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
-
}
void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = vec_extract(vsumf0, 0);
#else
-
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- a += 32;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- a += 32; q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = vec_extract(vsumf0, 0);
#else
-
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = vec_extract(vsumf0, 0);
#else
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].ql;
- const uint8_t * GGML_RESTRICT qh = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- }
- a += 128;
- q4 += 64;
- qh += 32;
- }
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- int scale = x[i].scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.125f * vec_extract(vsumf0, 0);
#else
-
- uint32_t aux32[2];
- const uint8_t * aux8 = (const uint8_t *)aux32;
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * GGML_RESTRICT q2 = x[i].qs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- memcpy(aux32, q2, 2*sizeof(uint32_t));
- q2 += 4;
- const uint32_t ls = 2*(aux32[1] >> 28) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
- const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls;
- }
- sumf += d * bsum;
- }
- *s = 0.125f * sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.125f * vec_extract(vsumf0, 0);
#else
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * GGML_RESTRICT q2 = x[i].qs;
- const uint8_t * GGML_RESTRICT sc = x[i].scales;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1;
- const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 2; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls1;
- sumi = 0;
- for (int l = 2; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls2;
- q2 += 4;
- }
- sumf += d * bsum;
- }
- *s = 0.125f * sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.125f * vec_extract(vsumf0, 0);
#else
-
- float sumf = 0;
- for (int i = 0; i < nb; i++) {
-
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint8_t * qh = x[i].qh;
- const uint8_t * signs = qs + QK_K/8;
-
- int bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf);
- int ls2 = 1 + 2*(x[i].scales[ib32] >> 4);
- int sumi1 = 0, sumi2 = 0;
- for (int l = 0; l < 2; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
- for (int j = 0; j < 8; ++j) {
- sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- for (int l = 2; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
- for (int j = 0; j < 8; ++j) {
- sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += ls1 * sumi1 + ls2 * sumi2;
- qs += 4;
- signs += 4;
- }
-
- sumf += d * bsum;
- }
-
- *s = 0.125f * sumf;
-
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
-
}
void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = 0.25f * vec_extract(vsumf0, 0);
#else
-
- uint32_t aux32;
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint8_t * GGML_RESTRICT q3 = x[i].qs;
- const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t);
- const uint32_t ls = 2*(aux32 >> 28) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]);
- const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]);
- const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- q3 += 8;
- bsum += sumi * ls;
- }
- sumf += d * bsum;
- }
- *s = 0.25f * sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq3_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = vec_extract(vsumf0, 0);
#else
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint8_t * GGML_RESTRICT qs = x[i].qs;
- const uint8_t * GGML_RESTRICT qh = x[i].qh;
- const uint8_t * GGML_RESTRICT signs = x[i].signs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
- const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1;
- const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256)));
- const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256)));
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- qs += 8;
- signs += 4;
- bsum += sumi * ls1;
- sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256)));
- const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256)));
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- qs += 8;
- signs += 4;
- bsum += sumi * ls2;
- }
- sumf += d * bsum;
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq3_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = vec_extract(vsumf0, 0);
#else
-
- float sumf = 0;
- for (int i = 0; i < nb; i++) {
-
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint16_t * qh = x[i].qh;
-
- int sumi = 0, sumi1 = 0;
- for (int ib = 0; ib < QK_K/32; ++ib) {
- const int ls = 2*((qh[ib] >> 12) & 7) + 1;
- const int delta = qh[ib] & 0x8000 ? -1 : 1;
- int lsum = 0;
- for (int l = 0; l < 4; ++l) {
- const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8)));
- for (int j = 0; j < 8; ++j) {
- lsum += q8[j] * grid[j];
- }
- q8 += 8;
- }
- sumi += ls * lsum;
- sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]);
- qs += 4;
- }
-
- sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1);
- }
-
- *s = sumf;
-
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq1_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
sumf = vec_extract(vsumf0, 0);
-#endif
- for (; ib < nb; ++ib) {
- const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d);
- int sumi1 = 0, sumi2 = 0;
- for (int j = 0; j < QK4_NL/2; ++j) {
- sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf];
- sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4];
- }
- sumf += d * (sumi1 + sumi2);
- }
*s = sumf;
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(ib);
+ UNUSED(sumf);
+ ggml_vec_dot_iq4_nl_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = vec_extract(vsumf0, 0);
#else
- float sumf = 0;
- for (int ibl = 0; ibl < nb; ++ibl) {
- const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d;
- uint16_t h = x[ibl].scales_h;
- const uint8_t * qs = x[ibl].qs;
- const int8_t * q8 = y[ibl].qs;
- for (int ib = 0; ib < QK_K/32; ib += 2) {
- const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30);
- const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30);
- h >>= 4;
- const float d1 = d4d8*(ls1 - 32);
- const float d2 = d4d8*(ls2 - 32);
- int sumi1 = 0, sumi2 = 0;
- for (int j = 0; j < 16; ++j) {
- sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
- sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
- }
- sumf += d1 * (sumi1 + sumi2);
- qs += 16;
- q8 += 32;
- sumi1 = sumi2 = 0;
- for (int j = 0; j < 16; ++j) {
- sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
- sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
- }
- sumf += d2 * (sumi1 + sumi2);
- qs += 16;
- q8 += 32;
- }
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
//===================================== Dot products =================================
void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+#if defined(__riscv_v)
const int qk = QK8_0;
const int nb = n / qk;
int ib = 0;
float sumf = 0;
-#if defined(__riscv_v)
size_t vl = qk / 2;
for (; ib < nb; ++ib) {
sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d);
}
-#endif
- for (; ib < nb; ++ib) {
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const int v0 = (x[ib].qs[j] & 0x0F) - 8;
- const int v1 = (x[ib].qs[j] >> 4) - 8;
-
- sumi0 += (v0 * y[ib].qs[j]);
- sumi1 += (v1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d);
- }
-
*s = sumf;
+#else
+ ggml_vec_dot_q4_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+#if defined(__riscv_v)
const int qk = QK8_1;
const int nb = n / qk;
int ib = 0;
float sumf = 0;
-#if defined(__riscv_v)
size_t vl = qk / 2;
for (; ib < nb; ++ib) {
sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
}
-#endif
- for (; ib < nb; ++ib) {
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const int v0 = (x[ib].qs[j] & 0x0F);
- const int v1 = (x[ib].qs[j] >> 4);
-
- sumi0 += (v0 * y[ib].qs[j]);
- sumi1 += (v1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
- }
-
*s = sumf;
+#else
+ ggml_vec_dot_q4_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+#if defined(__riscv_v)
const int qk = QK8_0;
const int nb = n / qk;
const block_q5_0 * GGML_RESTRICT x = vx;
const block_q8_0 * GGML_RESTRICT y = vy;
-#if defined(__riscv_v)
size_t vl;
size_t vlenb = __riscv_vlenb();
sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d) * GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi;
}
-#endif
- for (; ib < nb; ++ib) {
- uint32_t qh;
- memcpy(&qh, x[ib].qh, sizeof(qh));
-
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
- const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
-
- const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16);
- const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16);
-
- sumi0 += (x0 * y[ib].qs[j]);
- sumi1 += (x1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi;
- }
-
*s = sumf;
+#else
+ ggml_vec_dot_q5_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+#if defined(__riscv_v)
const int qk = QK8_1;
const int nb = n / qk;
const block_q5_1 * GGML_RESTRICT x = vx;
const block_q8_1 * GGML_RESTRICT y = vy;
-#if defined(__riscv_v)
size_t vl;
size_t vlenb = __riscv_vlenb();
sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
}
-#endif
- for (; ib < nb; ++ib) {
- uint32_t qh;
- memcpy(&qh, x[ib].qh, sizeof(qh));
-
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
-
- const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0;
- const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1;
-
- sumi0 += (x0 * y[ib].qs[j]);
- sumi1 += (x1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
- }
-
*s = sumf;
+#else
+ ggml_vec_dot_q5_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d));
}
-#endif
- for (; ib < nb; ++ib) {
- int sumi = 0;
+ *s = sumf;
+#else
- for (int j = 0; j < qk; j++) {
- sumi += x[ib].qs[j]*y[ib].qs[j];
- }
+ UNUSED(nb);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(ib);
+ UNUSED(sumf);
- sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d));
- }
-
- *s = sumf;
+ ggml_vec_dot_q8_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
#else
- float sumf = 0;
-
- for (int i = 0; i < nb; ++i) {
-
- const uint8_t * q2 = x[i].qs;
- const int8_t * q8 = y[i].qs;
- const uint8_t * sc = x[i].scales;
-
- int summs = 0;
- for (int j = 0; j < 16; ++j) {
- summs += y[i].bsums[j] * (sc[j] >> 4);
- }
-
- const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
- const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
- int isum = 0;
- int is = 0;
- int d;
- for (int k = 0; k < QK_K/128; ++k) {
- int shift = 0;
- for (int j = 0; j < 4; ++j) {
- d = sc[is++] & 0xF;
- int isuml = 0;
- for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- d = sc[is++] & 0xF;
- isuml = 0;
- for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- shift += 2;
- q8 += 32;
- }
- q2 += 32;
- }
- sumf += dall * isum - dmin * summs;
- }
- *s = sumf;
+ ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
- // scalar version
- // This function is written like this so the compiler can manage to vectorize most of it
- // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
- // manually vectorized version above. Every other version I tried would run at least 4 times slower.
- // The ideal situation would be if we could just write the code once, and the compiler would
- // automatically produce the best possible set of machine instructions, instead of us having to manually
- // write vectorized versions for AVX, ARM_NEON, etc.
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- uint32_t auxs[4];
- const int8_t * scales = (const int8_t*)auxs;
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q3 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].hmask;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- q3 += 32;
- }
- a = aux8;
-
- memcpy(auxs, x[i].scales, 12);
- uint32_t tmp = auxs[2];
- auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
- auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
- auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
- auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
#else
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(nb);
+ UNUSED(utmp);
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- a += 32;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- a += 32; q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
#else
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(nb);
+ UNUSED(utmp);
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
#else
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].ql;
- const uint8_t * GGML_RESTRICT qh = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- }
- a += 128;
- q4 += 64;
- qh += 32;
- }
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- int scale = x[i].scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
}
#endif
- {
- float sumf[8];
- int sumi;
-
- const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb);
-
- for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0;
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4;
- }
- sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d);
- }
- }
- }
- for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j];
- }
- }
+ ggml_gemv_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
}
void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
return;
}
-#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
- float sumf[4][8];
- int sumi;
-
- for (int y = 0; y < nr / 4; y++) {
- const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb);
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb);
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0;
- }
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) +
- (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4;
- }
- sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]);
- }
- }
- }
- }
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++)
- s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j];
- }
- }
- }
+#endif
+ ggml_gemm_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
}
sumf = acc[0] + acc[1] + acc[2] + acc[3];
-#endif
- for (; ib < nb; ++ib) {
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const int v0 = (x[ib].qs[j] & 0x0F) - 8;
- const int v1 = (x[ib].qs[j] >> 4) - 8;
-
- sumi0 += (v0 * y[ib].qs[j]);
- sumi1 += (v1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d);
- }
-
*s = sumf;
+#else
+ UNUSED(nb);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(ib);
+ UNUSED(sumf);
+ ggml_vec_dot_q4_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
sumf = acc[0] + acc[1] + acc[2] + acc[3] + summs;
-#endif
- for (; ib < nb; ++ib) {
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const int v0 = (x[ib].qs[j] & 0x0F);
- const int v1 = (x[ib].qs[j] >> 4);
-
- sumi0 += (v0 * y[ib].qs[j]);
- sumi1 += (v1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
- }
-
*s = sumf;
+#else
+ UNUSED(nb);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(ib);
+ UNUSED(sumf);
+ ggml_vec_dot_q4_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
sumf = acc[0] + acc[1] + acc[2] + acc[3];
-#endif
- for (; ib < nb; ++ib) {
- int sumi = 0;
-
- for (int j = 0; j < qk; j++) {
- sumi += x[ib].qs[j]*y[ib].qs[j];
- }
-
- sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d));
- }
-
*s = sumf;
+#else
+ UNUSED(nb);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(ib);
+ UNUSED(sumf);
+ ggml_vec_dot_q8_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = sum;
#else
- // scalar version
- // This function is written like this so the compiler can manage to vectorize most of it
- // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
- // manually vectorized version above. Every other version I tried would run at least 4 times slower.
- // The ideal situation would be if we could just write the code once, and the compiler would
- // automatically produce the best possible set of machine instructions, instead of us having to manually
- // write vectorized versions for AVX, ARM_NEON, etc.
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- uint32_t auxs[4];
- const int8_t * scales = (const int8_t*)auxs;
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q3 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].hmask;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- q3 += 32;
- }
- a = aux8;
-
- memcpy(auxs, x[i].scales, 12);
- uint32_t tmp = auxs[2];
- auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
- auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
- auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
- auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
-
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
-
}
void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = sumf;
#else
-
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- a += 32;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- a += 32; q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
-
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sum;
#else
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].ql;
- const uint8_t * GGML_RESTRICT qh = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- }
- a += 128;
- q4 += 64;
- qh += 32;
- }
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- int scale = x[i].scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
sumf += GGML_CPU_FP16_TO_FP32(x0->d) * GGML_CPU_FP16_TO_FP32(y0->d) * (v_xy[0] + v_xy[1] + v_xy[2] + v_xy[3]);
}
-#endif
- for (; ib < nb; ++ib) {
- const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d);
- int sumi1 = 0, sumi2 = 0;
- for (int j = 0; j < QK4_NL/2; ++j) {
- sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf];
- sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4];
- }
- sumf += d * (sumi1 + sumi2);
- }
*s = sumf;
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(ib);
+ UNUSED(sumf);
+ ggml_vec_dot_iq4_nl_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = sumf;
#else
- float sumf = 0;
- for (int ibl = 0; ibl < nb; ++ibl) {
- const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d;
- uint16_t h = x[ibl].scales_h;
- const uint8_t * qs = x[ibl].qs;
- const int8_t * q8 = y[ibl].qs;
- for (int ib = 0; ib < QK_K/32; ib += 2) {
- const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30);
- const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30);
- h >>= 4;
- const float d1 = d4d8*(ls1 - 32);
- const float d2 = d4d8*(ls2 - 32);
- int sumi1 = 0, sumi2 = 0;
- for (int j = 0; j < 16; ++j) {
- sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
- sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
- }
- sumf += d1 * (sumi1 + sumi2);
- qs += 16;
- q8 += 32;
- sumi1 = sumi2 = 0;
- for (int j = 0; j < 16; ++j) {
- sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
- sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
- }
- sumf += d2 * (sumi1 + sumi2);
- qs += 16;
- q8 += 32;
- }
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
-#endif
- for (; ib < nb; ++ib) {
- uint32_t qh;
- memcpy(&qh, x[ib].qh, sizeof(qh));
-
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
- const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
-
- const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16);
- const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16);
-
- sumi0 += (x0 * y[ib].qs[j]);
- sumi1 += (x1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi;
- }
-
*s = sumf;
+#else
+ UNUSED(nb);
+ UNUSED(ib);
+ UNUSED(sumf);
+ UNUSED(x);
+ UNUSED(y);
+ ggml_vec_dot_q5_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
-#endif
- for (; ib < nb; ++ib) {
- uint32_t qh;
- memcpy(&qh, x[ib].qh, sizeof(qh));
-
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
-
- const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0;
- const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1;
-
- sumi0 += (x0 * y[ib].qs[j]);
- sumi1 += (x1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
- }
-
*s = sumf;
+#else
+ UNUSED(nb);
+ UNUSED(ib);
+ UNUSED(sumf);
+ UNUSED(x);
+ UNUSED(y);
+ ggml_vec_dot_q5_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
sumf = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
-#endif
- for (; ib < nb; ++ib) {
- int sumi = 0;
-
- for (int j = 0; j < qk; j++) {
- sumi += x[ib].qs[j]*y[ib].qs[j];
- }
-
- sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d));
- }
-
*s = sumf;
+#else
+ UNUSED(nb);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(ib);
+ UNUSED(sumf);
+ ggml_vec_dot_q8_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
}
void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = sumf;
#else
-
- float sumf = 0;
-
- for (int i = 0; i < nb; ++i) {
-
- const uint8_t * q2 = x[i].qs;
- const int8_t * q8 = y[i].qs;
- const uint8_t * sc = x[i].scales;
-
- int summs = 0;
- for (int j = 0; j < 16; ++j) {
- summs += y[i].bsums[j] * (sc[j] >> 4);
- }
-
- const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
- const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
-
- int isum = 0;
- int is = 0;
- int d;
- for (int k = 0; k < QK_K/128; ++k) {
- int shift = 0;
- for (int j = 0; j < 4; ++j) {
- d = sc[is++] & 0xF;
- int isuml = 0;
- for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- d = sc[is++] & 0xF;
- isuml = 0;
- for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- shift += 2;
- q8 += 32;
- }
- q2 += 32;
- }
- sumf += dall * isum - dmin * summs;
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
- // scalar version
- // This function is written like this so the compiler can manage to vectorize most of it
- // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
- // manually vectorized version above. Every other version I tried would run at least 4 times slower.
- // The ideal situation would be if we could just write the code once, and the compiler would
- // automatically produce the best possible set of machine instructions, instead of us having to manually
- // write vectorized versions for AVX, ARM_NEON, etc.
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- uint32_t auxs[4];
- const int8_t * scales = (const int8_t*)auxs;
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q3 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].hmask;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- q3 += 32;
- }
- a = aux8;
-
- memcpy(auxs, x[i].scales, 12);
- uint32_t tmp = auxs[2];
- auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
- auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
- auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
- auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
-
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
-
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- a += 32;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- a += 32; q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
-
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = sumf;
#else
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].ql;
- const uint8_t * GGML_RESTRICT qh = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- }
- a += 128;
- q4 += 64;
- qh += 32;
- }
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- int scale = x[i].scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
const block_q8_1 * GGML_RESTRICT y = vy;
int ib = 0;
- float sumf = 0;
#if defined(__AVX2__) || defined(__AVX__)
// Initialize accumulator with zeros
#endif
}
- sumf = hsum_float_8(acc) + summs;
-
+ *s = hsum_float_8(acc) + summs;
+#else
+ UNUSED(nb);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(ib);
+ ggml_vec_dot_q4_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
- for (; ib < nb; ++ib) {
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const int v0 = (x[ib].qs[j] & 0x0F);
- const int v1 = (x[ib].qs[j] >> 4);
-
- sumi0 += (v0 * y[ib].qs[j]);
- sumi1 += (v1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
- }
-
- *s = sumf;
}
void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
const int nb = n / qk;
int ib = 0;
- float sumf = 0;
assert(n % qk == 0);
assert(qk == QK5_0);
acc = _mm256_fmadd_ps(d, q, acc);
}
- sumf = hsum_float_8(acc);
+ *s = hsum_float_8(acc);
#elif defined(__AVX__)
// Initialize accumulator with zeros
__m256 acc = _mm256_setzero_ps();
acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
}
- sumf = hsum_float_8(acc);
-
+ *s = hsum_float_8(acc);
+#else
+ UNUSED(nb);
+ UNUSED(ib);
+ UNUSED(x);
+ UNUSED(y);
+ ggml_vec_dot_q5_0_q8_0_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
- for (; ib < nb; ++ib) {
- uint32_t qh;
- memcpy(&qh, x[ib].qh, sizeof(qh));
-
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
- const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
-
- const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16);
- const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16);
-
- sumi0 += (x0 * y[ib].qs[j]);
- sumi1 += (x1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi;
- }
-
- *s = sumf;
}
void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
const int nb = n / qk;
int ib = 0;
- float sumf = 0;
assert(n % qk == 0);
assert(qk == QK5_1);
acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
}
- sumf = hsum_float_8(acc) + summs;
+ *s = hsum_float_8(acc) + summs;
#elif defined(__AVX__)
// Initialize accumulator with zeros
__m256 acc = _mm256_setzero_ps();
acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
}
- sumf = hsum_float_8(acc) + summs;
-
+ *s = hsum_float_8(acc) + summs;
+#else
+ UNUSED(nb);
+ UNUSED(ib);
+ UNUSED(x);
+ UNUSED(y);
+ ggml_vec_dot_q5_1_q8_1_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
- for (; ib < nb; ++ib) {
- uint32_t qh;
- memcpy(&qh, x[ib].qh, sizeof(qh));
-
- int sumi0 = 0;
- int sumi1 = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
-
- const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0;
- const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1;
-
- sumi0 += (x0 * y[ib].qs[j]);
- sumi1 += (x1 * y[ib].qs[j + qk/2]);
- }
-
- int sumi = sumi0 + sumi1;
- sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
- }
-
- *s = sumf;
}
void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
}
sumf = hsum_float_8(accum);
-
#endif
for (; ib < nb; ++ib) {
int sumi = 0;
*s = hsum_float_8(sumf);
#else
- const uint8_t pow3[6] = {1, 3, 9, 27, 81, 243};
-
- float sumf = 0.0f;
-
- for (int i = 0; i < nb; ++i) {
- int sum = 0;
-
- for (size_t j = 0; j < sizeof(x->qs) - sizeof(x->qs) % 32; j += 32) {
- for (size_t l = 0; l < 5; ++l) {
- for (size_t m = 0; m < 32; ++m) {
- uint8_t q = x[i].qs[j + m] * pow3[l];
- uint16_t xi = ((uint16_t) q * 3) >> 8;
- sum += (xi - 1) * y[i].qs[j*5 + l*32 + m];
- }
- }
- }
- for (size_t j = sizeof(x->qs) - sizeof(x->qs) % 32; j < sizeof(x->qs); j += 16) {
- for (size_t l = 0; l < 5; ++l) {
- for (size_t m = 0; m < 16; ++m) {
- uint8_t q = x[i].qs[j + m] * pow3[l];
- uint16_t xi = ((uint16_t) q * 3) >> 8;
- sum += (xi - 1) * y[i].qs[j*5 + l*16 + m];
- }
- }
- }
-
- for (size_t l = 0; l < 4; ++l) {
- for (size_t j = 0; j < sizeof(x->qh); ++j) {
- uint8_t q = x[i].qh[j] * pow3[l];
- uint16_t xi = ((uint16_t) q * 3) >> 8;
- sum += (xi - 1) * y[i].qs[sizeof(x->qs)*5 + l*sizeof(x->qh) + j];
- }
- }
-
- sumf += (float) sum * (GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d);
- }
-
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_tq1_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(sumf);
#else
- float sumf = 0.0f;
-
- for (int i = 0; i < nb; ++i) {
- int32_t sumi = 0;
-
- for (size_t j = 0; j < sizeof(x->qs); j += 32) {
- for (size_t l = 0; l < 4; ++l) {
- for (size_t k = 0; k < 32; ++k) {
- sumi += y[i].qs[j*4 + l*32 + k] * (((x[i].qs[j + k] >> (l*2)) & 3) - 1);
- }
- }
- }
-
- const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
-
- sumf += (float) sumi * d;
- }
-
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_tq2_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(acc);
#else
-
- float sumf = 0;
-
- for (int i = 0; i < nb; ++i) {
-
- const uint8_t * q2 = x[i].qs;
- const int8_t * q8 = y[i].qs;
- const uint8_t * sc = x[i].scales;
-
- int summs = 0;
- for (int j = 0; j < 16; ++j) {
- summs += y[i].bsums[j] * (sc[j] >> 4);
- }
-
- const float dall = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
- const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
-
- int isum = 0;
- int is = 0;
- int d;
- for (int k = 0; k < QK_K/128; ++k) {
- int shift = 0;
- for (int j = 0; j < 4; ++j) {
- d = sc[is++] & 0xF;
- int isuml = 0;
- for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- d = sc[is++] & 0xF;
- isuml = 0;
- for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
- isum += d * isuml;
- shift += 2;
- q8 += 32;
- }
- q2 += 32;
- }
- sumf += dall * isum - dmin * summs;
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(acc);
#else
- // scalar version
- // This function is written like this so the compiler can manage to vectorize most of it
- // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
- // manually vectorized version above. Every other version I tried would run at least 4 times slower.
- // The ideal situation would be if we could just write the code once, and the compiler would
- // automatically produce the best possible set of machine instructions, instead of us having to manually
- // write vectorized versions for AVX, ARM_NEON, etc.
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- uint32_t auxs[4];
- const int8_t * scales = (const int8_t*)auxs;
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q3 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].hmask;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
- for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
- a += 32; m <<= 1;
- q3 += 32;
- }
- a = aux8;
-
- memcpy(auxs, x[i].scales, 12);
- uint32_t tmp = auxs[2];
- auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
- auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
- auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
- auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
- for (int j = 0; j < QK_K/16; ++j) {
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
-
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
-
}
void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
#else
-
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- a += 32;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- a += 32; q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(acc) + summs;
#else
-
- const uint8_t * scales = (const uint8_t*)&utmp[0];
- const uint8_t * mins = (const uint8_t*)&utmp[2];
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].qs;
- const uint8_t * GGML_RESTRICT hm = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- uint8_t m = 1;
- for (int j = 0; j < QK_K/64; ++j) {
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
- for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
- a += 32; m <<= 1;
- q4 += 32;
- }
- memcpy(utmp, x[i].scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-
- int sumi = 0;
- for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/32; ++j) {
- int32_t scale = scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- const float dmin = GGML_CPU_FP16_TO_FP32(x[i].dmin) * y[i].d;
- sumf -= dmin * sumi;
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(acc);
#else
-
- int8_t aux8[QK_K];
- int16_t aux16[8];
- float sums [8];
- int32_t aux32[8];
- memset(sums, 0, 8*sizeof(float));
-
- float sumf = 0;
- for (int i = 0; i < nb; ++i) {
- const uint8_t * GGML_RESTRICT q4 = x[i].ql;
- const uint8_t * GGML_RESTRICT qh = x[i].qh;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- memset(aux32, 0, 8*sizeof(int32_t));
- int8_t * GGML_RESTRICT a = aux8;
- for (int j = 0; j < QK_K; j += 128) {
- for (int l = 0; l < 32; ++l) {
- a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
- a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
- a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
- a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
- }
- a += 128;
- q4 += 64;
- qh += 32;
- }
- a = aux8;
- int is = 0;
- for (int j = 0; j < QK_K/16; ++j) {
- int scale = x[i].scales[is++];
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
- for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
- q8 += 8; a += 8;
- }
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
- }
- for (int l = 0; l < 8; ++l) sumf += sums[l];
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.125f * hsum_float_8(accumf);
#else
-
- uint32_t aux32[2];
- const uint8_t * aux8 = (const uint8_t *)aux32;
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * GGML_RESTRICT q2 = x[i].qs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- memcpy(aux32, q2, 2*sizeof(uint32_t));
- q2 += 4;
- const uint32_t ls = 2*(aux32[1] >> 28) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
- const uint8_t signs = ksigns_iq2xs[(aux32[1] >> 7*l) & 127];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls;
- }
- sumf += d * bsum;
- }
- *s = 0.125f * sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.125f * hsum_float_8(accumf);
#else
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint16_t * GGML_RESTRICT q2 = x[i].qs;
- const uint8_t * GGML_RESTRICT sc = x[i].scales;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- const uint16_t ls1 = 2*(sc[ib32] & 0xf) + 1;
- const uint16_t ls2 = 2*(sc[ib32] >> 4) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 2; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls1;
- sumi = 0;
- for (int l = 2; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[l] & 511));
- const uint8_t signs = ksigns_iq2xs[q2[l] >> 9];
- for (int j = 0; j < 8; ++j) {
- sumi += grid[j] * q8[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += sumi * ls2;
- q2 += 4;
- }
- sumf += d * bsum;
- }
- *s = 0.125f * sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = 0.125f * hsum_float_8(accumf);
#else
-
- float sumf = 0;
- for (int i = 0; i < nb; i++) {
-
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint8_t * qh = x[i].qh;
- const uint8_t * signs = qs + QK_K/8;
-
- int bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- int ls1 = 1 + 2*(x[i].scales[ib32] & 0xf);
- int ls2 = 1 + 2*(x[i].scales[ib32] >> 4);
- int sumi1 = 0, sumi2 = 0;
- for (int l = 0; l < 2; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
- for (int j = 0; j < 8; ++j) {
- sumi1 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- for (int l = 2; l < 4; ++l) {
- const uint8_t * grid = (const uint8_t *)(iq2s_grid + (qs[l] | (qh[ib32] << (8-2*l) & 0x300)));
- for (int j = 0; j < 8; ++j) {
- sumi2 += q8[j] * grid[j] * (signs[l] & kmask_iq2xs[j] ? -1 : 1);
- }
- q8 += 8;
- }
- bsum += ls1 * sumi1 + ls2 * sumi2;
- qs += 4;
- signs += 4;
- }
-
- sumf += d * bsum;
- }
-
- *s = 0.125f * sumf;
-
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
-
}
void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
*s = 0.25f * hsum_float_8(accumf);
#else
-
- uint32_t aux32;
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint8_t * GGML_RESTRICT q3 = x[i].qs;
- const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- memcpy(&aux32, gas, sizeof(uint32_t)); gas += sizeof(uint32_t);
- const uint32_t ls = 2*(aux32 >> 28) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*l+0]);
- const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*l+1]);
- const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*l) & 127];
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- q3 += 8;
- bsum += sumi * ls;
- }
- sumf += d * bsum;
- }
- *s = 0.25f * sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq3_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(accumf);
#else
-
- float sumf = 0.f;
- for (int i = 0; i < nb; ++i) {
- const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
- const uint8_t * GGML_RESTRICT qs = x[i].qs;
- const uint8_t * GGML_RESTRICT qh = x[i].qh;
- const uint8_t * GGML_RESTRICT signs = x[i].signs;
- const int8_t * GGML_RESTRICT q8 = y[i].qs;
- int32_t bsum = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
- const uint32_t ls1 = 2*(x[i].scales[ib32/2] & 0xf) + 1;
- const uint32_t ls2 = 2*(x[i].scales[ib32/2] >> 4) + 1;
- int32_t sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+0] << (8-2*l)) & 256)));
- const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+0] << (7-2*l)) & 256)));
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- qs += 8;
- signs += 4;
- bsum += sumi * ls1;
- sumi = 0;
- for (int l = 0; l < 4; ++l) {
- const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*l+0] | ((qh[ib32+1] << (8-2*l)) & 256)));
- const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*l+1] | ((qh[ib32+1] << (7-2*l)) & 256)));
- for (int j = 0; j < 4; ++j) {
- sumi += grid1[j] * q8[j+0] * (signs[l] & kmask_iq2xs[j+0] ? -1 : 1);
- sumi += grid2[j] * q8[j+4] * (signs[l] & kmask_iq2xs[j+4] ? -1 : 1);
- }
- q8 += 8;
- }
- qs += 8;
- signs += 4;
- bsum += sumi * ls2;
- }
- sumf += d * bsum;
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq3_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(accum) + IQ1S_DELTA * accum1;
#else
-
- float sumf = 0;
- for (int i = 0; i < nb; i++) {
-
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint16_t * qh = x[i].qh;
-
- int sumi = 0, sumi1 = 0;
- for (int ib = 0; ib < QK_K/32; ++ib) {
- const int ls = 2*((qh[ib] >> 12) & 7) + 1;
- const int delta = qh[ib] & 0x8000 ? -1 : 1;
- int lsum = 0;
- for (int l = 0; l < 4; ++l) {
- const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((qh[ib] >> 3*l) & 7) << 8)));
- for (int j = 0; j < 8; ++j) {
- lsum += q8[j] * grid[j];
- }
- q8 += 8;
- }
- sumi += ls * lsum;
- sumi1 += ls * delta * (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]);
- qs += 4;
- }
-
- sumf += GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d * (sumi + IQ1S_DELTA * sumi1);
- }
-
- *s = sumf;
-
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq1_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(accum1) + IQ1M_DELTA * hsum_float_8(accum2);
#else
-
- int sum1[2], sum2[2], delta[4];
-
- float sumf = 0;
- for (int i = 0; i < nb; i++) {
-
- const int8_t * q8 = y[i].qs;
- const uint8_t * qs = x[i].qs;
- const uint8_t * qh = x[i].qh;
- const uint16_t * sc = (const uint16_t *)x[i].scales;
-
- scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
-
- int sumi1 = 0, sumi2 = 0;
- for (int ib = 0; ib < QK_K/32; ++ib) {
- delta[0] = qh[0] & 0x08 ? -1 : 1;
- delta[1] = qh[0] & 0x80 ? -1 : 1;
- delta[2] = qh[1] & 0x08 ? -1 : 1;
- delta[3] = qh[1] & 0x80 ? -1 : 1;
- sum1[0] = sum1[1] = sum2[0] = sum2[1] = 0;
- for (int l = 0; l < 4; ++l) {
- const int8_t * grid = (const int8_t *)(iq1s_grid + (qs[l] | (((uint16_t)qh[l/2] << (8 - 4*(l%2))) & 0x700)));
- int lsum1 = 0, lsum2 = 0;
- for (int j = 0; j < 8; ++j) {
- lsum1 += q8[j] * grid[j];
- lsum2 += q8[j];
- }
- q8 += 8;
- sum1[l/2] += lsum1;
- sum2[l/2] += lsum2*delta[l];
- }
-
- const int ls1 = 2*((sc[ib/2] >> (6*(ib%2)+0)) & 0x7) + 1;
- const int ls2 = 2*((sc[ib/2] >> (6*(ib%2)+3)) & 0x7) + 1;
-
- sumi1 += sum1[0] * ls1 + sum1[1] * ls2;
- sumi2 += sum2[0] * ls1 + sum2[1] * ls2;
- qs += 4;
- qh += 2;
- }
-
- sumf += GGML_CPU_FP16_TO_FP32(scale.f16) * y[i].d * (sumi1 + IQ1M_DELTA * sumi2);
- }
-
- *s = sumf;
-
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(scale);
+ ggml_vec_dot_iq1_m_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
*s = hsum_float_8(accum);
#else
- float sumf = 0;
- for (int ibl = 0; ibl < nb; ++ibl) {
- const float d4d8 = GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d;
- uint16_t h = x[ibl].scales_h;
- const uint8_t * qs = x[ibl].qs;
- const int8_t * q8 = y[ibl].qs;
- for (int ib = 0; ib < QK_K/32; ib += 2) {
- const uint8_t ls1 = (x[ibl].scales_l[ib/2] & 0xf) | ((h << 4) & 0x30);
- const uint8_t ls2 = (x[ibl].scales_l[ib/2] >> 4) | ((h << 2) & 0x30);
- h >>= 4;
- const float d1 = d4d8*(ls1 - 32);
- const float d2 = d4d8*(ls2 - 32);
- int sumi1 = 0, sumi2 = 0;
- for (int j = 0; j < 16; ++j) {
- sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
- sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
- }
- sumf += d1 * (sumi1 + sumi2);
- qs += 16;
- q8 += 32;
- sumi1 = sumi2 = 0;
- for (int j = 0; j < 16; ++j) {
- sumi1 += q8[j+ 0] * kvalues_iq4nl[qs[j] & 0xf];
- sumi2 += q8[j+16] * kvalues_iq4nl[qs[j] >> 4];
- }
- sumf += d2 * (sumi1 + sumi2);
- qs += 16;
- q8 += 32;
- }
- }
- *s = sumf;
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
#endif
}
}
#else
- // scalar
- const int blck_size_interleave = 8;
- float srcv[4][QK8_0];
- float id[4];
-
- for (int i = 0; i < nb; i++) {
- for (int row_iter = 0; row_iter < 4; row_iter++) {
- float amax = 0.0f; // absolute max
-
- for (int j = 0; j < QK8_0; j++) {
- srcv[row_iter][j] = x[row_iter * k + i * QK8_0 + j];
- amax = MAX(amax, fabsf(srcv[row_iter][j]));
- }
-
- const float d = amax / ((1 << 7) - 1);
- id[row_iter] = d ? 1.0f / d : 0.0f;
-
- y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d);
- }
-
- for (int j = 0; j < QK8_0 * 4; j++) {
- int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave;
- int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave;
- src_offset += (j % blck_size_interleave);
-
- float x0 = srcv[src_id][src_offset] * id[src_id];
- y[i].qs[j] = roundf(x0);
- }
- }
+ UNUSED(nb);
+ UNUSED(y);
+ ggml_quantize_mat_q8_0_4x8_generic(x, vy, k);
#endif
}
}
#else
-
- // scalar
- const int blck_size_interleave = 8;
- float srcv[4][QK_K];
- float iscale[4];
-
- for (int i = 0; i < nb; i++) {
- for (int row_iter = 0; row_iter < 4; row_iter++) {
- float amax = 0.0f; // absolute max
- float max = 0;
-
- for (int j = 0; j < QK_K; j++) {
- srcv[row_iter][j] = x[row_iter * k + i * QK_K + j];
- // Update the maximum value of the corresponding super block
- if(amax < fabsf(srcv[row_iter][j])) {
- amax = fabsf(srcv[row_iter][j]);
- max = srcv[row_iter][j];
- }
- }
-
- iscale[row_iter] = amax ? -127.f/max : 0;
-
- y[i].d[row_iter] = amax ? 1/iscale[row_iter] : 0;
- }
-
- for (int j = 0; j < QK_K / 4; j++) {
- y[i].bsums[j] = 0;
- }
-
- // Quants values are interleaved in sequence of eight bytes from corresponding super blocks
- // Bsums values are interleaved in sequence of four bsums from each super block taken for interleaving
- // i.e first four bsums from the first super block, followed by first four bsums from second super block and so on
- for (int j = 0; j < QK_K * 4; j++) {
- int src_offset = (j / (4 * blck_size_interleave)) * blck_size_interleave;
- int src_id = (j % (4 * blck_size_interleave)) / blck_size_interleave;
- src_offset += (j % blck_size_interleave);
- int index = (((j & 31) >> 3) << 2) + ((j >> 8) << 4) + ((j >> 6) & 3);
-
- float x0 = srcv[src_id][src_offset] * iscale[src_id];
- y[i].qs[j] = nearest_int(x0);
- y[i].bsums[index] += y[i].qs[j];
- }
- }
+ UNUSED(nb);
+ UNUSED(y);
+ ggml_quantize_mat_q8_K_4x8_generic(x, vy, k);
#endif
}
return;
#endif
- {
- float sumf[8];
- int sumi;
-
- const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb);
-
- for (int j = 0; j < ncols_interleaved; j++) sumf[j] = 0.0;
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * blocklen + i]) + (v1 * a_ptr[l].qs[k * blocklen + i + qk / 2])) >> 4;
- }
- sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d);
- }
- }
- }
- for (int j = 0; j < ncols_interleaved; j++) s[x * ncols_interleaved + j] = sumf[j];
- }
- }
+ ggml_gemv_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
}
void ggml_gemv_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
}
#else
-
- float sumf[8];
- float sum_minf[8];
- uint32_t utmp[32];
- int sumi1;
- int sumi2;
- int sumi;
-
- const block_q8_K * a_ptr = (const block_q8_K *) vy;
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb);
-
- for (int j = 0; j < ncols_interleaved; j++) {
- sumf[j] = 0.0;
- sum_minf[j] = 0.0;
- }
- for (int l = 0; l < nb; l++) {
- for (int sb = 0; sb < 8; sb++) {
- memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12);
- utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4);
- const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1;
- utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4);
- utmp[sb * 4 + 2] = uaux_0;
- utmp[sb * 4 + 0] &= kmask1;
- }
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- uint8_t *scales_0 = (uint8_t*) utmp + (k / 4) * 32;
- uint8_t *scales_1 = (uint8_t*) utmp + (k / 4) * 32 + 16;
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi1 = 0;
- sumi2 = 0;
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4);
- sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 64 + (k % 4) * blocklen + i]);
- sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 64 + (k % 4) * blocklen + i + 32]);
- sumi1 = sumi1 * scales_0[j];
- sumi2 = sumi2 * scales_1[j];
- sumi += sumi1 + sumi2;
- }
- sumf[j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d;
- }
- }
- for (int sb = 0; sb < 8; sb++) {
- uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16;
- for (int j = 0; j < ncols_interleaved; j++) {
- sum_minf[j] += mins[j] * (a_ptr[l].bsums[sb * 2] + a_ptr[l].bsums[sb * 2 + 1]) * GGML_CPU_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d;
- }
- }
- }
- for (int j = 0; j < ncols_interleaved; j++) {
- s[x * ncols_interleaved + j] = sumf[j] - sum_minf[j];
- }
- }
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ ggml_gemv_q4_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc);
#endif
}
}
#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
- float sumf[4][8];
- int sumi;
-
- for (int y = 0; y < nr / 4; y++) {
- const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb);
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_0x8 * b_ptr = (const block_q4_0x8 *) vx + (x * nb);
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) sumf[m][j] = 0.0;
- }
- for (int l = 0; l < nb; l++) {
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] << 4);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF0);
- sumi += ((v0 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i]) +
- (v1 * a_ptr[l].qs[k * 4 * blocklen + m * blocklen + i + qk / 2 * 4])) >> 4;
- }
- sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * GGML_CPU_FP16_TO_FP32(a_ptr[l].d[m]);
- }
- }
- }
- }
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++)
- s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j];
- }
- }
- }
+ ggml_gemm_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
}
void ggml_gemm_q4_K_8x8_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
}
#else
-
- float sumf[4][8];
- float sum_minf[4][8];
- uint32_t utmp[32];
- int sumi1;
- int sumi2;
- int sumi;
-
- for (int y = 0; y < nr / 4; y++) {
- const block_q8_Kx4 * a_ptr = (const block_q8_Kx4 *) vy + (y * nb);
- for (int x = 0; x < nc / ncols_interleaved; x++) {
- const block_q4_Kx8 * b_ptr = (const block_q4_Kx8 *) vx + (x * nb);
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumf[m][j] = 0.0;
- sum_minf[m][j] = 0.0;
- }
- }
- for (int l = 0; l < nb; l++) {
- for (int sb = 0; sb < 8; sb++) {
- memcpy(utmp + sb * 4, b_ptr[l].scales + sb * 12, 12);
- utmp[sb * 4 + 3] = ((utmp[sb * 4 + 2] >> 4) & kmask2) | (((utmp[sb * 4 + 1] >> 6) & kmask3) << 4);
- const uint32_t uaux_0 = utmp[sb * 4 + 1] & kmask1;
- utmp[sb * 4 + 1] = (utmp[sb * 4 + 2] & kmask2) | (((utmp[sb * 4 + 0] >> 6) & kmask3) << 4);
- utmp[sb * 4 + 2] = uaux_0;
- utmp[sb * 4 + 0] &= kmask1;
- }
- for (int k = 0; k < (qk / (2 * blocklen)); k++) {
- uint8_t *scales_0 = (uint8_t*) utmp + (k / 4) * 32;
- uint8_t *scales_1 = (uint8_t*) utmp + (k / 4) * 32 + 16;
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- sumi1 = 0;
- sumi2 = 0;
- sumi = 0;
- for (int i = 0; i < blocklen; ++i) {
- const int v0 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] & 0xF);
- const int v1 = (int8_t) (b_ptr[l].qs[k * ncols_interleaved * blocklen + j * blocklen + i] >> 4);
- sumi1 = (v0 * a_ptr[l].qs[(k >> 2) * 256 + (k % 4) * 4 * blocklen + m * blocklen + i]);
- sumi2 = (v1 * a_ptr[l].qs[(k >> 2) * 256 + (k % 4) * 4 * blocklen + m * blocklen + i + 128]);
- sumi1 = sumi1 * scales_0[j];
- sumi2 = sumi2 * scales_1[j];
- sumi += sumi1 + sumi2;
- }
- sumf[m][j] += sumi * GGML_CPU_FP16_TO_FP32(b_ptr[l].d[j]) * a_ptr[l].d[m];
- }
- }
- }
- for (int sb = 0; sb < 8; sb++) {
- uint8_t *mins = (uint8_t*) utmp + 8 + sb * 16;
- for(int m = 0; m < 4; m++) {
- const int16_t *bsums = a_ptr[l].bsums + (sb * 8) + (m * 4) - ((sb % 2) * 6);
- for(int j = 0; j < ncols_interleaved; j++) {
- sum_minf[m][j] += mins[j] * (bsums[0] + bsums[1]) * GGML_CPU_FP16_TO_FP32(b_ptr[l].dmin[j]) * a_ptr[l].d[m];
- }
- }
- }
- }
- for (int m = 0; m < 4; m++) {
- for (int j = 0; j < ncols_interleaved; j++) {
- s[(y * 4 + m) * bs + x * ncols_interleaved + j] = sumf[m][j] - sum_minf[m][j];
- }
- }
- }
- }
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ ggml_gemm_q4_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc);
#endif
}