#if defined(GGML_USE_ACCELERATE)
#include <Accelerate/Accelerate.h>
+#if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
+#include "ggml-opencl.h"
+#endif
#elif defined(GGML_USE_OPENBLAS)
#include <cblas.h>
#elif defined(GGML_USE_CUBLAS)
#undef bool
#define bool _Bool
#else
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#include <intrin.h>
+#else
#include <immintrin.h>
#endif
#endif
+#endif
#ifdef __F16C__
#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
#define B8(c,s ) B7(c,s, c), B7(c,s, s)
-// precomputed tables for expanding 8bits to 8 bytes (shl 4)
-static const uint64_t table_b2b_u[1 << 8] = { B8(00, 10) };
+// precomputed tables for expanding 8bits to 8 bytes:
+static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
+static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
#endif
// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
// quantization
//
-#if __AVX__ || __AVX2__ || __AVX512F__
-// Unpack 16 4-bit fields into 16 bytes
-// The output vector contains 16 bytes, each one in [ 0 .. 15 ] interval
-static inline __m128i bytes_from_nibbles_16(const uint8_t * rsi)
-{
- // Load 8 bytes from memory
- __m128i tmp = _mm_loadl_epi64( ( const __m128i* )rsi );
-
- // Expand bytes into uint16_t values
- __m128i bytes = _mm_cvtepu8_epi16( tmp );
-
- // Unpack values into individual bytes
- const __m128i lowMask = _mm_set1_epi8( 0xF );
- __m128i high = _mm_andnot_si128( lowMask, bytes );
- __m128i low = _mm_and_si128( lowMask, bytes );
- high = _mm_slli_epi16( high, 4 );
- bytes = _mm_or_si128( low, high );
- return bytes;
+#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
+// multiply int8_t, add results pairwise twice
+static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
+ // Get absolute values of x vectors
+ const __m128i ax = _mm_sign_epi8(x, x);
+ // Sign the values of the y vectors
+ const __m128i sy = _mm_sign_epi8(y, x);
+ // Perform multiplication and create 16-bit values
+ const __m128i dot = _mm_maddubs_epi16(ax, sy);
+ const __m128i ones = _mm_set1_epi16(1);
+ return _mm_madd_epi16(ones, dot);
}
+#if __AVX__ || __AVX2__ || __AVX512F__
// horizontally add 8 floats
static inline float hsum_float_8(const __m256 x) {
__m128 res = _mm256_extractf128_ps(x, 1);
uint32_t x32;
memcpy(&x32, x, sizeof(uint32_t));
const __m256i shuf_mask = _mm256_set_epi64x(
- 0x0303030303030303, 0x0202020202020202,
- 0x0101010101010101, 0x0000000000000000);
+ 0x0303030303030303, 0x0202020202020202,
+ 0x0101010101010101, 0x0000000000000000);
__m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
bytes = _mm256_or_si256(bytes, bit_mask);
// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
{
- // Load 16 bytes from memory
- __m128i tmp = _mm_loadu_si128( ( const __m128i* )rsi );
-
- // Expand bytes into uint16_t values
- __m256i bytes = _mm256_cvtepu8_epi16( tmp );
-
- // Unpack values into individual bytes
+ const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
+ const __m256i bytes = _mm256_set_m128i(_mm_srli_epi16(tmp, 4), tmp);
const __m256i lowMask = _mm256_set1_epi8( 0xF );
- __m256i high = _mm256_andnot_si256( lowMask, bytes );
- __m256i low = _mm256_and_si256( lowMask, bytes );
- high = _mm256_slli_epi16( high, 4 );
- bytes = _mm256_or_si256( low, high );
- return bytes;
+ return _mm256_and_si256(lowMask, bytes);
}
// add int16_t pairwise and return as float vector
return _mm_packus_epi16( bytes1, bytes2);
}
#endif
+#elif defined(__SSSE3__)
+// horizontally add 4x4 floats
+static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
+ __m128 res_0 =_mm_hadd_ps(a, b);
+ __m128 res_1 =_mm_hadd_ps(c, d);
+ __m128 res =_mm_hadd_ps(res_0, res_1);
+ res =_mm_hadd_ps(res, res);
+ res =_mm_hadd_ps(res, res);
+
+ return _mm_cvtss_f32(res);
+}
#endif // __AVX__ || __AVX2__ || __AVX512F__
+#endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
#if __ARM_NEON
MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
}
-int8x8_t vzip1_s8(int8x8_t a, int8x8_t b) {
- int8x8_t res;
-
- res[0] = a[0]; res[1] = b[0];
- res[2] = a[1]; res[3] = b[1];
- res[4] = a[2]; res[5] = b[2];
- res[6] = a[3]; res[7] = b[3];
-
- return res;
-}
-
-int8x8_t vzip2_s8(int8x8_t a, int8x8_t b) {
- int8x8_t res;
-
- res[0] = a[4]; res[1] = b[4];
- res[2] = a[5]; res[3] = b[5];
- res[4] = a[6]; res[5] = b[6];
- res[6] = a[7]; res[7] = b[7];
-
- return res;
-}
-
-uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) {
- uint8x8_t res;
-
- res[0] = a[0]; res[1] = b[0];
- res[2] = a[1]; res[3] = b[1];
- res[4] = a[2]; res[5] = b[2];
- res[6] = a[3]; res[7] = b[3];
-
- return res;
-}
-
-uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) {
- uint8x8_t res;
-
- res[0] = a[4]; res[1] = b[4];
- res[2] = a[5]; res[3] = b[5];
- res[4] = a[6]; res[5] = b[6];
- res[6] = a[7]; res[7] = b[7];
-
- return res;
-}
-
-int8x16_t vzip1q_s8(int8x16_t a, int8x16_t b) {
- int8x16_t res;
-
- res[0] = a[0]; res[1] = b[0]; res[2] = a[1]; res[3] = b[1];
- res[4] = a[2]; res[5] = b[2]; res[6] = a[3]; res[7] = b[3];
- res[8] = a[4]; res[9] = b[4]; res[10] = a[5]; res[11] = b[5];
- res[12] = a[6]; res[13] = b[6]; res[14] = a[7]; res[15] = b[7];
-
- return res;
-}
-
-int8x16_t vzip2q_s8(int8x16_t a, int8x16_t b) {
- int8x16_t res;
-
- res[0] = a[8]; res[1] = b[8]; res[2] = a[9]; res[3] = b[9];
- res[4] = a[10]; res[5] = b[10]; res[6] = a[11]; res[7] = b[11];
- res[8] = a[12]; res[9] = b[12]; res[10] = a[13]; res[11] = b[13];
- res[12] = a[14]; res[13] = b[14]; res[14] = a[15]; res[15] = b[15];
-
- return res;
-}
-
-uint8x16_t vzip1q_u8(uint8x16_t a, uint8x16_t b) {
- uint8x16_t res;
-
- res[0] = a[0]; res[1] = b[0]; res[2] = a[1]; res[3] = b[1];
- res[4] = a[2]; res[5] = b[2]; res[6] = a[3]; res[7] = b[3];
- res[8] = a[4]; res[9] = b[4]; res[10] = a[5]; res[11] = b[5];
- res[12] = a[6]; res[13] = b[6]; res[14] = a[7]; res[15] = b[7];
-
- return res;
-}
-
-uint8x16_t vzip2q_u8(uint8x16_t a, uint8x16_t b) {
- uint8x16_t res;
-
- res[0] = a[8]; res[1] = b[8]; res[2] = a[9]; res[3] = b[9];
- res[4] = a[10]; res[5] = b[10]; res[6] = a[11]; res[7] = b[11];
- res[8] = a[12]; res[9] = b[12]; res[10] = a[13]; res[11] = b[13];
- res[12] = a[14]; res[13] = b[14]; res[14] = a[15]; res[15] = b[15];
-
- return res;
-}
-
int32x4_t vcvtnq_s32_f32(float32x4_t v) {
int32x4_t res;
} block_q4_1;
static_assert(sizeof(block_q4_1) == 2 * sizeof(float) + QK4_1 / 2, "wrong q4_1 block size/padding");
-#define QK4_2 16
-typedef struct {
- ggml_fp16_t d; // delta
- uint8_t qs[QK4_2 / 2]; // nibbles / quants
-} block_q4_2;
-static_assert(sizeof(block_q4_2) == sizeof(ggml_fp16_t) + QK4_2 / 2, "wrong q4_2 block size/padding");
-
#define QK5_0 32
typedef struct {
ggml_fp16_t d; // delta
#define QK8_1 32
typedef struct {
- float d; // delta
- float s0; // d * sum(qs[i]) low
- float s1; // d * sum(qs[i]) high
- int8_t qs[QK8_1]; // quants
+ float d; // delta
+ float s; // d * sum(qs[i])
+ int8_t qs[QK8_1]; // quants
} block_q8_1;
-static_assert(sizeof(block_q8_1) == 3*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
+static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
// reference implementation for deterministic creation of model files
static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
- assert(k % QK4_0 == 0);
- const int nb = k / QK4_0;
+ static const int qk = QK4_0;
+
+ assert(k % qk == 0);
- uint8_t pp[QK4_0/2];
+ const int nb = k / qk;
for (int i = 0; i < nb; i++) {
float amax = 0.0f; // absolute max
- float max = 0.0f;
+ float max = 0.0f;
- for (int l = 0; l < QK4_0; l++) {
- const float v = x[i*QK4_0 + l];
+ for (int j = 0; j < qk; j++) {
+ const float v = x[i*qk + j];
if (amax < fabsf(v)) {
amax = fabsf(v);
- max = v;
+ max = v;
}
}
- const float d = max / -8;
+ const float d = max / -8;
const float id = d ? 1.0f/d : 0.0f;
y[i].d = d;
- for (int l = 0; l < QK4_0; l += 2) {
- const float v0 = x[i*QK4_0 + l + 0]*id;
- const float v1 = x[i*QK4_0 + l + 1]*id;
+ for (int j = 0; j < qk/2; ++j) {
+ const float x0 = x[i*qk + 0 + j]*id;
+ const float x1 = x[i*qk + qk/2 + j]*id;
+
+ const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
+ const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
+
+ y[i].qs[j] = xi0;
+ y[i].qs[j] |= xi1 << 4;
+ }
+ }
+}
+
+static void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q4_0_reference(x, y, k);
+}
+
+static void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
+ const int qk = QK4_1;
+
+ assert(k % qk == 0);
- const uint8_t vi0 = MIN(15, (int8_t)roundf(v0) + 8);
- const uint8_t vi1 = MIN(15, (int8_t)roundf(v1) + 8);
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ float min = FLT_MAX;
+ float max = -FLT_MAX;
- assert(vi0 < 16);
- assert(vi1 < 16);
+ for (int j = 0; j < qk; j++) {
+ const float v = x[i*qk + j];
- pp[l/2] = vi0 | (vi1 << 4);
+ if (v < min) min = v;
+ if (v > max) max = v;
}
- memcpy(y[i].qs, pp, sizeof(pp));
+ const float d = (max - min) / ((1 << 4) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = d;
+ y[i].m = min;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const float x0 = (x[i*qk + 0 + j] - min)*id;
+ const float x1 = (x[i*qk + qk/2 + j] - min)*id;
+
+ const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
+ const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
+
+ y[i].qs[j] = xi0;
+ y[i].qs[j] |= xi1 << 4;
+ }
}
}
-static void quantize_row_q4_0(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK4_0 == 0);
- const int nb = k / QK4_0;
+static void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q4_1_reference(x, y, k);
+}
+
+static void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
+ static const int qk = QK5_0;
- block_q4_0 * restrict y = vy;
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+ float max = 0.0f;
+
+ for (int j = 0; j < qk; j++) {
+ const float v = x[i*qk + j];
+ if (amax < fabsf(v)) {
+ amax = fabsf(v);
+ max = v;
+ }
+ }
+
+ const float d = max / -16;
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ uint32_t qh = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const float x0 = x[i*qk + 0 + j]*id;
+ const float x1 = x[i*qk + qk/2 + j]*id;
+
+ const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
+ const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
+
+ y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
+
+ // get the 5-th bit and store it in qh at the right position
+ qh |= ((xi0 & 0x10) >> 4) << (j + 0);
+ qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
+ }
+
+ memcpy(&y[i].qh, &qh, sizeof(qh));
+ }
+}
+
+static void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q5_0_reference(x, y, k);
+}
+
+static void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
+ const int qk = QK5_1;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ float min = FLT_MAX;
+ float max = -FLT_MAX;
+
+ for (int j = 0; j < qk; j++) {
+ const float v = x[i*qk + j];
+
+ if (v < min) min = v;
+ if (v > max) max = v;
+ }
+
+ const float d = (max - min) / ((1 << 5) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+ y[i].m = GGML_FP32_TO_FP16(min);
+
+ uint32_t qh = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const float x0 = (x[i*qk + 0 + j] - min)*id;
+ const float x1 = (x[i*qk + qk/2 + j] - min)*id;
+
+ const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
+ const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
+
+ y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
+
+ // get the 5-th bit and store it in qh at the right position
+ qh |= ((xi0 & 0x10) >> 4) << (j + 0);
+ qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
+ }
+
+ memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
+ }
+}
+
+static void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q5_1_reference(x, y, k);
+}
+
+// reference implementation for deterministic creation of model files
+static void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
+ assert(k % QK8_0 == 0);
+ const int nb = k / QK8_0;
-#if defined(__POWER9_VECTOR__)
- const vector float v85 = vec_splats(8.5f);
- const vector signed int v15 = vec_splats(15);
for (int i = 0; i < nb; i++) {
- float max = 0.0f;
- float min = 0.0f;
-
- vector float asrcv [8];
- vector float srcv [8];
- vector float maxv[8];
- vector float minv[8];
-
- for (int l = 0; l < 8; l++) srcv[l] = *(vector float *)(x + i*32 + 4*l);
- //for (int l = 0; l < 8; l++) asrcv[l] = vec_abs(srcv[l]);
-
- for (int l = 0; l < 4; l++) maxv[2*l] = vec_max(asrcv[2*l], asrcv[2*l+1]);
- //for (int l = 0; l < 2; l++) maxv[4*l] = vec_max(maxv[4*l], maxv[4*l+2]);
- maxv[0] = vec_max(maxv[0], maxv[2]);
- maxv[4] = vec_max(maxv[4], maxv[6]);
- //for (int l = 0; l < 1; l++) maxv[8*l] = vec_max(maxv[8*l], maxv[8*l+4]);
- maxv[0] = vec_max(maxv[0], maxv[4]);
-
- for (int l = 0; l < 4; l++) minv[2*l] = vec_min(asrcv[2*l], asrcv[2*l+1]);
- //for (int l = 0; l < 2; l++) minv[4*l] = vec_min(minv[4*l], minv[4*l+2]);
- minv[0] = vec_min(minv[0], minv[2]);
- minv[4] = vec_min(minv[4], minv[6]);
- //for (int l = 0; l < 1; l++) minv[8*l] = vec_min(minv[8*l], minv[8*l+4]);
- minv[0] = vec_min(minv[0], minv[4]);
-
-
- max = MAX(
- MAX(vec_extract(maxv[0], 0), vec_extract(maxv[0], 1)),
- MAX(vec_extract(maxv[0], 2), vec_extract(maxv[0], 3)));
- min = MIN(
- MIN(vec_extract(minv[0], 0), vec_extract(minv[0], 1)),
- MIN(vec_extract(minv[0], 2), vec_extract(minv[0], 3)));
-
- const float magnitude = max >= fabsf(min) ? max : min;
- const float d = magnitude / -8;
- const float id = d ? 1.0/d : 0.0;
+ float amax = 0.0f; // absolute max
+
+ for (int j = 0; j < QK8_0; j++) {
+ const float v = x[i*QK8_0 + j];
+ amax = MAX(amax, fabsf(v));
+ }
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
y[i].d = d;
- const vector float vid = vec_splats(id);
- uint8_t * restrict pb = y[i].qs;
- for (int l = 0; l < 8; l++) {
- const vector float vf = vec_madd(srcv[l], vid, v85);
- const vector signed int vi = vec_signed(vf);
- const vector signed int vc = vec_min(vi, v15);
+ for (int j = 0; j < QK8_0; ++j) {
+ const float x0 = x[i*QK8_0 + j]*id;
- pb[2*l + 0] = vec_extract(vc, 0) | (vec_extract(vc, 1) << 4);
- pb[2*l + 1] = vec_extract(vc, 2) | (vec_extract(vc, 3) << 4);
+ y[i].qs[j] = roundf(x0);
}
}
-#elif __ARM_NEON
+}
+
+static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
+ assert(QK8_0 == 32);
+ assert(k % QK8_0 == 0);
+ const int nb = k / QK8_0;
+
+ block_q8_0 * restrict y = vy;
+
+#if defined(__ARM_NEON)
for (int i = 0; i < nb; i++) {
float32x4_t srcv [8];
- float32x4_t maxv[8];
- float32x4_t minv[8];
-
- for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*32 + 4*l);
+ float32x4_t asrcv[8];
+ float32x4_t amaxv[8];
- for (int l = 0; l < 4; l++) maxv[2*l] = vmaxq_f32(srcv[2*l], srcv[2*l+1]);
- for (int l = 0; l < 2; l++) maxv[4*l] = vmaxq_f32(maxv[4*l], maxv[4*l+2]);
- for (int l = 0; l < 1; l++) maxv[8*l] = vmaxq_f32(maxv[8*l], maxv[8*l+4]);
+ for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
+ for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
- for (int l = 0; l < 4; l++) minv[2*l] = vminq_f32(srcv[2*l], srcv[2*l+1]);
- for (int l = 0; l < 2; l++) minv[4*l] = vminq_f32(minv[4*l], minv[4*l+2]);
- for (int l = 0; l < 1; l++) minv[8*l] = vminq_f32(minv[8*l], minv[8*l+4]);
+ for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
+ for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
+ for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
- const float max = vmaxvq_f32(maxv[0]);
- const float min = vminvq_f32(minv[0]);
+ const float amax = vmaxvq_f32(amaxv[0]);
- const float magnitude = max >= fabsf(min) ? max : min;
- const float d = magnitude / -8;
+ const float d = amax / ((1 << 7) - 1);
const float id = d ? 1.0f/d : 0.0f;
y[i].d = d;
- for (int l = 0; l < 8; l++) {
- const float32x4_t v = vmulq_n_f32(srcv[l], id);
- const float32x4_t vf = vaddq_f32(v, vdupq_n_f32(8.5f));
- const int32x4_t vi = vcvtq_s32_f32(vf);
- const int32x4_t vc = vminq_s32(vi, vdupq_n_s32(15));
+ for (int j = 0; j < 8; j++) {
+ const float32x4_t v = vmulq_n_f32(srcv[j], id);
+ const int32x4_t vi = vcvtnq_s32_f32(v);
- y[i].qs[2*l + 0] = vgetq_lane_s32(vc, 0) | (vgetq_lane_s32(vc, 1) << 4);
- y[i].qs[2*l + 1] = vgetq_lane_s32(vc, 2) | (vgetq_lane_s32(vc, 3) << 4);
+ y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
+ y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
+ y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
+ y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
}
}
-#elif defined(__AVX2__)
+#elif defined(__AVX2__) || defined(__AVX__)
for (int i = 0; i < nb; i++) {
// Load elements into 4 AVX vectors
__m256 v0 = _mm256_loadu_ps( x );
__m256 v3 = _mm256_loadu_ps( x + 24 );
x += 32;
- // Compute max for the block
- __m256 max = _mm256_max_ps( v0, v1 );
- __m256 maxTmp = _mm256_max_ps( v2, v3 );
- max = _mm256_max_ps( max, maxTmp );
+ // Compute max(abs(e)) for the block
+ const __m256 signBit = _mm256_set1_ps( -0.0f );
+ __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
- __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( max, 1 ), _mm256_castps256_ps128( max ) );
+ __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
const float maxScalar = _mm_cvtss_f32( max4 );
- // Compute min for the block
- __m256 min = _mm256_min_ps( v0, v1 );
- __m256 minTmp = _mm256_min_ps( v2, v3 );
- min = _mm256_min_ps( min, minTmp );
-
- __m128 min4 = _mm_min_ps( _mm256_extractf128_ps( min, 1 ), _mm256_castps256_ps128( min ) );
- min4 = _mm_min_ps( min4, _mm_movehl_ps( min4, min4 ) );
- min4 = _mm_min_ss( min4, _mm_movehdup_ps( min4 ) );
- const float minScalar = _mm_cvtss_f32( min4 );
-
// Quantize these floats
- const float magnitude = maxScalar >= fabsf(minScalar) ? maxScalar : minScalar;
- const float d = magnitude / -8.0f;
+ const float d = maxScalar / 127.f;
y[i].d = d;
- const float id = ( magnitude != 0.0f ) ? -8.0f / magnitude : 0.0f;
+ const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
const __m256 mul = _mm256_set1_ps( id );
// Apply the multiplier
__m256i i2 = _mm256_cvtps_epi32( v2 );
__m256i i3 = _mm256_cvtps_epi32( v3 );
+#if defined(__AVX2__)
// Convert int32 to int16
i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
i0 = _mm256_permutevar8x32_epi32( i0, perm );
- // Apply offset and clamp to translate the range from [ -8 .. +8 ] into [ +0 .. +15 ]
- const __m256i off = _mm256_set1_epi8( 8 );
- i0 = _mm256_add_epi8( i0, off );
- const __m256i maxNibble = _mm256_set1_epi8( 15 );
- i0 = _mm256_min_epi8( i0, maxNibble );
-
- // Compress the vector into 4 bit/value, and store
- __m128i res = packNibbles( i0 );
- _mm_storeu_si128( ( __m128i* )y[i].qs, res );
- }
-#elif defined(__AVX__)
- for (int i = 0; i < nb; i++) {
- // Load elements into 4 AVX vectors
- __m256 v0 = _mm256_loadu_ps( x );
- __m256 v1 = _mm256_loadu_ps( x + 8 );
- __m256 v2 = _mm256_loadu_ps( x + 16 );
- __m256 v3 = _mm256_loadu_ps( x + 24 );
- x += 32;
-
- // Compute max for the block
- __m256 max = _mm256_max_ps( v0, v1 );
- __m256 maxTmp = _mm256_max_ps( v2, v3 );
- max = _mm256_max_ps( max, maxTmp );
-
- __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( max, 1 ), _mm256_castps256_ps128( max ) );
- max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
- max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
- const float maxScalar = _mm_cvtss_f32( max4 );
-
- // Compute min for the block
- __m256 min = _mm256_min_ps( v0, v1 );
- __m256 minTmp = _mm256_min_ps( v2, v3 );
- min = _mm256_min_ps( min, minTmp );
-
- __m128 min4 = _mm_min_ps( _mm256_extractf128_ps( min, 1 ), _mm256_castps256_ps128( min ) );
- min4 = _mm_min_ps( min4, _mm_movehl_ps( min4, min4 ) );
- min4 = _mm_min_ss( min4, _mm_movehdup_ps( min4 ) );
- const float minScalar = _mm_cvtss_f32( min4 );
-
- // Quantize these floats
- const float magnitude = maxScalar >= fabsf(minScalar) ? maxScalar : minScalar;
- const float d = magnitude / -8.0f;
- y[i].d = d;
- const float id = ( magnitude != 0.0f ) ? -8.0f / magnitude : 0.0f;
- const __m256 mul = _mm256_set1_ps( id );
-
- // Apply the multiplier
- v0 = _mm256_mul_ps( v0, mul );
- v1 = _mm256_mul_ps( v1, mul );
- v2 = _mm256_mul_ps( v2, mul );
- v3 = _mm256_mul_ps( v3, mul );
-
- // Round to nearest integer
- v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
- v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
- v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
- v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
-
- // Convert floats to integers
- __m256i i0 = _mm256_cvtps_epi32( v0 );
- __m256i i1 = _mm256_cvtps_epi32( v1 );
- __m256i i2 = _mm256_cvtps_epi32( v2 );
- __m256i i3 = _mm256_cvtps_epi32( v3 );
-
+ _mm256_storeu_si256((__m256i *)y[i].qs, i0);
+#else
// Since we don't have in AVX some necessary functions,
// we split the registers in half and call AVX2 analogs from SSE
__m128i ni0 = _mm256_castsi256_si128( i0 );
ni0 = _mm_packs_epi16( ni0, ni2 );
ni4 = _mm_packs_epi16( ni4, ni6 );
- // Apply offset and clamp to translate the range from [ -8 .. +8 ] into [ +0 .. +15 ]
- const __m128i off = _mm_set1_epi8( 8 );
- ni0 = _mm_add_epi8( ni0, off );
- ni4 = _mm_add_epi8( ni4, off );
- const __m128i maxNibble = _mm_set1_epi8( 15 );
- ni0 = _mm_min_epi8( ni0, maxNibble );
- ni4 = _mm_min_epi8( ni4, maxNibble );
-
- // Compress the vector into 4 bit/value, and store
- __m128i res = packNibbles( ni0, ni4 );
- _mm_storeu_si128( ( __m128i* )y[i].qs, res );
+ _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
+ _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
+#endif
}
-#elif defined(__wasm_simd128__)
- for (int i = 0; i < nb; i++) {
- float max = 0.0f;
- float min = 0.0f;
+#else
+ // scalar
+ quantize_row_q8_0_reference(x, y, k);
+#endif
+}
- v128_t srcv [8];
- v128_t maxv[8];
- v128_t minv[8];
-
- for (int l = 0; l < 8; l++) srcv[l] = wasm_v128_load(x + i*32 + 4*l);
-
- for (int l = 0; l < 4; l++) maxv[2*l] = wasm_f32x4_max(srcv[2*l], srcv[2*l+1]);
- for (int l = 0; l < 2; l++) maxv[4*l] = wasm_f32x4_max(maxv[4*l], maxv[4*l+2]);
- for (int l = 0; l < 1; l++) maxv[8*l] = wasm_f32x4_max(maxv[8*l], maxv[8*l+4]);
-
- for (int l = 0; l < 4; l++) minv[2*l] = wasm_f32x4_min(srcv[2*l], srcv[2*l+1]);
- for (int l = 0; l < 2; l++) minv[4*l] = wasm_f32x4_min(minv[4*l], minv[4*l+2]);
- for (int l = 0; l < 1; l++) minv[8*l] = wasm_f32x4_min(minv[8*l], minv[8*l+4]);
-
- max = MAX(
- MAX(wasm_f32x4_extract_lane(maxv[0], 0), wasm_f32x4_extract_lane(maxv[0], 1)),
- MAX(wasm_f32x4_extract_lane(maxv[0], 2), wasm_f32x4_extract_lane(maxv[0], 3)));
- min = MIN(
- MIN(wasm_f32x4_extract_lane(minv[0], 0), wasm_f32x4_extract_lane(minv[0], 1)),
- MIN(wasm_f32x4_extract_lane(minv[0], 2), wasm_f32x4_extract_lane(minv[0], 3)));
-
- const float magnitude = max >= fabsf(min) ? max : min;
- const float d = magnitude / -8;
- const float id = d ? 1.0/d : 0.0;
-
- y[i].d = d;
-
- for (int l = 0; l < 8; l++) {
- const v128_t v = wasm_f32x4_mul(srcv[l], wasm_f32x4_splat(id));
- const v128_t vf = wasm_f32x4_add(v, wasm_f32x4_splat(8.5f));
- const v128_t vi = wasm_i32x4_trunc_sat_f32x4(vf);
- const v128_t vc = wasm_i32x4_min(vi, wasm_i32x4_splat(15));
-
- y[i].qs[2*l + 0] = wasm_i32x4_extract_lane(vc, 0) | (wasm_i32x4_extract_lane(vc, 1) << 4);
- y[i].qs[2*l + 1] = wasm_i32x4_extract_lane(vc, 2) | (wasm_i32x4_extract_lane(vc, 3) << 4);
- }
- }
-#else
- // scalar
- quantize_row_q4_0_reference(x, y, k);
-#endif
-}
-
-static void quantize_row_q4_1_reference(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK4_1 == 0);
- const int nb = k / QK4_1;
-
- block_q4_1 * restrict y = vy;
-
- uint8_t pp[QK4_1/2];
-
- for (int i = 0; i < nb; i++) {
- float min = FLT_MAX;
- float max = -FLT_MAX;
-
- for (int l = 0; l < QK4_1; l++) {
- const float v = x[i*QK4_1 + l];
- if (v < min) min = v;
- if (v > max) max = v;
- }
-
- const float d = (max - min) / ((1 << 4) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = d;
- y[i].m = min;
-
- for (int l = 0; l < QK4_1; l += 2) {
- const float v0 = (x[i*QK4_1 + l + 0] - min)*id;
- const float v1 = (x[i*QK4_1 + l + 1] - min)*id;
-
- const uint8_t vi0 = roundf(v0);
- const uint8_t vi1 = roundf(v1);
-
- assert(vi0 < 16);
- assert(vi1 < 16);
-
- pp[l/2] = vi0 | (vi1 << 4);
- }
-
- memcpy(y[i].qs, pp, sizeof(pp));
- }
-}
-
-static void quantize_row_q4_1(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK4_1 == 0);
-
- const int nb = k / QK4_1;
-
- block_q4_1 * restrict y = vy;
-
-#if defined(__AVX2__)
- for (int i = 0; i < nb; i++) {
- // Load elements into 4 AVX vectors
- __m256 v0 = _mm256_loadu_ps( x );
- __m256 v1 = _mm256_loadu_ps( x + 8 );
- __m256 v2 = _mm256_loadu_ps( x + 16 );
- __m256 v3 = _mm256_loadu_ps( x + 24 );
- x += 32;
-
- // Compute max for the block
- __m256 vmax;
- vmax = _mm256_max_ps( v0, v1 );
- vmax = _mm256_max_ps( vmax, v2 );
- vmax = _mm256_max_ps( vmax, v3 );
-
- __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( vmax, 1 ), _mm256_castps256_ps128( vmax ) );
- max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
- max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
- const float maxScalar = _mm_cvtss_f32( max4 );
-
- // Compute min for the block
- __m256 vmin;
- vmin = _mm256_min_ps( v0, v1 );
- vmin = _mm256_min_ps( vmin, v2 );
- vmin = _mm256_min_ps( vmin, v3 );
-
- __m128 min4 = _mm_min_ps( _mm256_extractf128_ps( vmin, 1 ), _mm256_castps256_ps128( vmin ) );
- min4 = _mm_min_ps( min4, _mm_movehl_ps( min4, min4 ) );
- min4 = _mm_min_ss( min4, _mm_movehdup_ps( min4 ) );
- const float minScalar = _mm_cvtss_f32( min4 );
-
- // Quantize these floats
- const float d = (maxScalar - minScalar) / ((1 << 4) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].m = minScalar;
- y[i].d = d;
-
- // x = (x-min)*id
- const __m256 mul = _mm256_set1_ps( id );
- const __m256 off = _mm256_set1_ps( minScalar );
- v0 = _mm256_mul_ps( _mm256_sub_ps( v0, off ), mul );
- v1 = _mm256_mul_ps( _mm256_sub_ps( v1, off ), mul );
- v2 = _mm256_mul_ps( _mm256_sub_ps( v2, off ), mul );
- v3 = _mm256_mul_ps( _mm256_sub_ps( v3, off ), mul );
-
- // Round to nearest integer
- v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
- v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
- v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
- v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
-
- // Convert floats to integers
- __m256i i0 = _mm256_cvtps_epi32( v0 );
- __m256i i1 = _mm256_cvtps_epi32( v1 );
- __m256i i2 = _mm256_cvtps_epi32( v2 );
- __m256i i3 = _mm256_cvtps_epi32( v3 );
-
- // Convert int32 to int16
- i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
- i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
- // Convert int16 to int8
- i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
-
- // We got our precious signed bytes, but the order is now wrong
- // These AVX2 pack instructions process 16-byte pieces independently
- // The following instruction is fixing the order
- const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
- i0 = _mm256_permutevar8x32_epi32( i0, perm );
-
- // Compress the vector into 4 bit/value, and store
- __m128i res = packNibbles( i0 );
- _mm_storeu_si128( ( __m128i* )y[i].qs, res );
- }
-#elif __ARM_NEON
- for (int i = 0; i < nb; i++) {
- float32x4_t srcv[8];
- float32x4_t minv[8];
- float32x4_t maxv[8];
-
- for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*QK4_1 + 4*l);
-
- for (int l = 0; l < 4; l++) minv[2*l] = vminq_f32(srcv[2*l], srcv[2*l + 1]);
- for (int l = 0; l < 2; l++) minv[4*l] = vminq_f32(minv[4*l], minv[4*l + 2]);
- for (int l = 0; l < 1; l++) minv[8*l] = vminq_f32(minv[8*l], minv[8*l + 4]);
-
- for (int l = 0; l < 4; l++) maxv[2*l] = vmaxq_f32(srcv[2*l], srcv[2*l + 1]);
- for (int l = 0; l < 2; l++) maxv[4*l] = vmaxq_f32(maxv[4*l], maxv[4*l + 2]);
- for (int l = 0; l < 1; l++) maxv[8*l] = vmaxq_f32(maxv[8*l], maxv[8*l + 4]);
-
- const float min = vminvq_f32(minv[0]);
- const float max = vmaxvq_f32(maxv[0]);
-
- const float d = (max - min) / ((1 << 4) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = d;
- y[i].m = min;
-
- const float32x4_t minv0 = vdupq_n_f32(min);
-
- for (int l = 0; l < 8; l++) {
- const float32x4_t v = vmulq_n_f32(vsubq_f32(srcv[l], minv0), id);
- const float32x4_t vf = vaddq_f32(v, vdupq_n_f32(0.5f)); // needed to round to nearest
- const int32x4_t vi = vcvtq_s32_f32(vf);
-
- y[i].qs[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4);
- y[i].qs[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4);
- }
- }
-#else
- // scalar
- quantize_row_q4_1_reference(x, vy, k);
-#endif
-}
-
-// reference implementation for deterministic creation of model files
-static void quantize_row_q4_2_reference(const float * restrict x, block_q4_2 * restrict y, int k) {
- assert(k % QK4_2 == 0);
-
- const int nb = k / QK4_2;
-
- for (int i = 0; i < nb; i++) {
- float amax = 0.0f; // absolute max
- float max = 0.0f;
-
- for (int l = 0; l < QK4_2; l++) {
- const float v = x[i*QK4_2 + l];
- if (amax < fabsf(v)) {
- amax = fabsf(v);
- max = v;
- }
- }
-
- const float d = max / -8;
-
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = GGML_FP32_TO_FP16(d);
-
- for (int l = 0; l < QK4_2; l += 2) {
- const float v0 = x[i*QK4_2 + l + 0]*id;
- const float v1 = x[i*QK4_2 + l + 1]*id;
-
- const uint8_t vi0 = MIN(15, (uint8_t)(v0 + 8.5f));
- const uint8_t vi1 = MIN(15, (uint8_t)(v1 + 8.5f));
-
- assert(vi0 < 16);
- assert(vi1 < 16);
-
- y[i].qs[l/2] = vi0 | (vi1 << 4);
- }
- }
-}
-
-static void quantize_row_q4_2(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK4_2 == 0);
-
- block_q4_2 * restrict y = vy;
-
- quantize_row_q4_2_reference(x, y, k);
-}
-
-static void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
- assert(k % QK5_0 == 0);
- const int nb = k / QK5_0;
-
- for (int i = 0; i < nb; i++) {
- float amax = 0.0f; // absolute max
- float max = 0.0f;
-
- for (int l = 0; l < QK5_0; l++) {
- const float v = x[i*QK5_0 + l];
- if (amax < fabsf(v)) {
- amax = fabsf(v);
- max = v;
- }
- }
-
- const float d = max / -16;
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = GGML_FP32_TO_FP16(d);
-
- uint32_t qh = 0;
-
- for (int l = 0; l < QK5_0; l += 2) {
- const float v0 = x[i*QK5_0 + l + 0]*id;
- const float v1 = x[i*QK5_0 + l + 1]*id;
-
- const uint32_t vi0 = MIN(31, (int) (v0 + 16.5f));
- const uint32_t vi1 = MIN(31, (int) (v1 + 16.5f));
-
- y[i].qs[l/2] = (vi0 & 0x0F) | ((vi1 & 0x0F) << 4);
-
- // get the 5-th bit and store it in qh at the right position
- qh |= ((vi0 & 0x10) >> 4) << (l + 0);
- qh |= ((vi1 & 0x10) >> 4) << (l + 1);
- }
-
- memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
- }
-}
-
-static void quantize_row_q5_0(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK5_0 == 0);
-
- block_q5_0 * restrict y = vy;
-
- quantize_row_q5_0_reference(x, y, k);
-}
-
-static void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
- assert(k % QK5_1 == 0);
- const int nb = k / QK5_1;
-
- for (int i = 0; i < nb; i++) {
- float min = FLT_MAX;
- float max = -FLT_MAX;
-
- for (int l = 0; l < QK5_1; l++) {
- const float v = x[i*QK5_1 + l];
- if (v < min) min = v;
- if (v > max) max = v;
- }
-
- const float d = (max - min) / ((1 << 5) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = GGML_FP32_TO_FP16(d);
- y[i].m = GGML_FP32_TO_FP16(min);
-
- uint32_t qh = 0;
-
- for (int l = 0; l < QK5_1; l += 2) {
- const float v0 = (x[i*QK5_1 + l + 0] - min)*id;
- const float v1 = (x[i*QK5_1 + l + 1] - min)*id;
-
- const uint32_t vi0 = (int) (v0 + 0.5f);
- const uint32_t vi1 = (int) (v1 + 0.5f);
-
- y[i].qs[l/2] = (vi0 & 0x0F) | ((vi1 & 0x0F) << 4);
-
- // get the 5-th bit and store it in qh at the right position
- qh |= ((vi0 & 0x10) >> 4) << (l + 0);
- qh |= ((vi1 & 0x10) >> 4) << (l + 1);
- }
-
- memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
- }
-}
-
-static void quantize_row_q5_1(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK5_1 == 0);
-
- block_q5_1 * restrict y = vy;
-
- quantize_row_q5_1_reference(x, y, k);
-}
-
-// reference implementation for deterministic creation of model files
-static void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
- assert(k % QK8_0 == 0);
- const int nb = k / QK8_0;
-
- for (int i = 0; i < nb; i++) {
- float amax = 0.0f; // absolute max
-
- for (int l = 0; l < QK8_0; l++) {
- const float v = x[i*QK8_0 + l];
- amax = MAX(amax, fabsf(v));
- }
-
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = d;
-
- for (int l = 0; l < QK8_0; ++l) {
- const float v0 = x[i*QK8_0 + l]*id;
-
- y[i].qs[l] = roundf(v0);
- }
- }
-}
-
-static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK8_0 == 0);
-
- block_q8_0 * restrict y = vy;
-
- quantize_row_q8_0_reference(x, y, k);
-}
-
-// reference implementation for deterministic creation of model files
-static void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
- assert(k % QK8_1 == 0);
- const int nb = k / QK8_1;
+// reference implementation for deterministic creation of model files
+static void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
+ assert(QK8_1 == 32);
+ assert(k % QK8_1 == 0);
+ const int nb = k / QK8_1;
for (int i = 0; i < nb; i++) {
float amax = 0.0f; // absolute max
- for (int l = 0; l < QK8_1; l++) {
- const float v = x[i*QK8_1 + l];
+ for (int j = 0; j < QK8_1; j++) {
+ const float v = x[i*QK8_1 + j];
amax = MAX(amax, fabsf(v));
}
y[i].d = d;
- int sum0 = 0;
- int sum1 = 0;
+ int sum = 0;
- for (int l = 0; l < QK8_1/2; ++l) {
- const float v0 = x[i*QK8_1 + l]*id;
- const float v1 = x[i*QK8_1 + QK8_1/2 + l]*id;
+ for (int j = 0; j < QK8_1/2; ++j) {
+ const float v0 = x[i*QK8_1 + j]*id;
+ const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
- y[i].qs[ l] = roundf(v0);
- y[i].qs[QK8_1/2 + l] = roundf(v1);
+ y[i].qs[ j] = roundf(v0);
+ y[i].qs[QK8_1/2 + j] = roundf(v1);
- sum0 += y[i].qs[ l];
- sum1 += y[i].qs[QK8_1/2 + l];
+ sum += y[i].qs[ j];
+ sum += y[i].qs[QK8_1/2 + j];
}
- y[i].s0 = d * sum0;
- y[i].s1 = d * sum1;
+ y[i].s = d * sum;
}
}
float32x4_t asrcv[8];
float32x4_t amaxv[8];
- for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*32 + 4*l);
- for (int l = 0; l < 8; l++) asrcv[l] = vabsq_f32(srcv[l]);
+ for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
+ for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
- for (int l = 0; l < 4; l++) amaxv[2*l] = vmaxq_f32(asrcv[2*l], asrcv[2*l+1]);
- for (int l = 0; l < 2; l++) amaxv[4*l] = vmaxq_f32(amaxv[4*l], amaxv[4*l+2]);
- for (int l = 0; l < 1; l++) amaxv[8*l] = vmaxq_f32(amaxv[8*l], amaxv[8*l+4]);
+ for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
+ for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
+ for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
const float amax = vmaxvq_f32(amaxv[0]);
y[i].d = d;
- int32x4_t accv0 = vdupq_n_s32(0);
- int32x4_t accv1 = vdupq_n_s32(0);
-
- // low half
- for (int l = 0; l < 4; l++) {
- const float32x4_t v = vmulq_n_f32(srcv[l], id);
- const int32x4_t vi = vcvtnq_s32_f32(v);
-
- y[i].qs[4*l + 0] = vgetq_lane_s32(vi, 0);
- y[i].qs[4*l + 1] = vgetq_lane_s32(vi, 1);
- y[i].qs[4*l + 2] = vgetq_lane_s32(vi, 2);
- y[i].qs[4*l + 3] = vgetq_lane_s32(vi, 3);
+ int32x4_t accv = vdupq_n_s32(0);
- accv0 = vaddq_s32(accv0, vi);
- }
-
- // high half
- for (int l = 4; l < 8; l++) {
- const float32x4_t v = vmulq_n_f32(srcv[l], id);
+ for (int j = 0; j < 8; j++) {
+ const float32x4_t v = vmulq_n_f32(srcv[j], id);
const int32x4_t vi = vcvtnq_s32_f32(v);
- y[i].qs[4*l + 0] = vgetq_lane_s32(vi, 0);
- y[i].qs[4*l + 1] = vgetq_lane_s32(vi, 1);
- y[i].qs[4*l + 2] = vgetq_lane_s32(vi, 2);
- y[i].qs[4*l + 3] = vgetq_lane_s32(vi, 3);
+ y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
+ y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
+ y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
+ y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
- accv1 = vaddq_s32(accv1, vi);
+ accv = vaddq_s32(accv, vi);
}
- const int32_t sum0 = vaddvq_s32(accv0);
- const int32_t sum1 = vaddvq_s32(accv1);
-
- y[i].s0 = d * sum0;
- y[i].s1 = d * sum1;
+ y[i].s = d * vaddvq_s32(accv);
}
#elif defined(__AVX2__) || defined(__AVX__)
for (int i = 0; i < nb; i++) {
#if defined(__AVX2__)
// Compute the sum of the quants and set y[i].s
- //y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
- y[i].s0 = d * hsum_i32_8(_mm256_add_epi32(i0, i1));
- y[i].s1 = d * hsum_i32_8(_mm256_add_epi32(i2, i3));
+ y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
// Convert int32 to int16
i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
// Compute the sum of the quants and set y[i].s
const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
- y[i].s0 = d * hsum_i32_4(s0);
- y[i].s1 = d * hsum_i32_4(s1);
+ y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
// Convert int32 to int16
ni0 = _mm_packs_epi32( ni0, ni1 );
#endif
}
-static void dequantize_row_q4_0(const void * restrict vx, float * restrict y, int k) {
- assert(k % QK4_0 == 0);
- const int nb = k / QK4_0;
+static void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
+ static const int qk = QK4_0;
- const block_q4_0 * restrict x = vx;
+ assert(k % qk == 0);
-#if defined(__AVX2__)
- for (int i = 0; i < nb; i++) {
- // scale factor
- const __m256 d_v = _mm256_broadcast_ss(&x[i].d);
+ const int nb = k / qk;
- const uint8_t * restrict pp = x[i].qs;
-
- for (int l = 0; l < QK4_0; l += 32) {
- // Load 32x4-bit integers into 32x8-bit integers
- __m256i vx8 = bytes_from_nibbles_32(pp+l/2);
-
- // Subtract 8 from the integers
- vx8 = _mm256_sub_epi8(vx8, _mm256_set1_epi8(8));
-
- // Convert to 16-bit int
- const __m256i vx16_lo = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 0));
- const __m256i vx16_hi = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 1));
-
- // Convert to 32-bit int -> float 32
- const __m256 vf[4] = {
- _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 0))),
- _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 1))),
- _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 0))),
- _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 1)))
- };
-
- // Scale and store
- for (int j = 0; j < 4; j++) {
- const __m256 result = _mm256_mul_ps(vf[j], d_v);
- _mm256_storeu_ps(y + i * QK4_0 + l + j*8, result);
- }
- }
- }
-#elif defined(__ARM_NEON)
- for (int i = 0; i < nb; i++) {
- const float32x4_t vd = vdupq_n_f32(x[i].d);
-
- const uint8_t * restrict pp = x[i].qs;
-
- for (int l = 0; l < QK4_0; l += 16) {
- // Load 16x4-bit integers into 8x8-bit integers
- const uint8x8_t v8 = vld1_u8(pp + l/2);
-
- // Expand 4-bit qs to 8-bit bytes
- const uint8x8_t v0 = vand_u8(v8, vdup_n_u8(0x0F));
- const uint8x8_t v1 = vshr_n_u8(v8, 4);
-
- // Convert to signed 8-bit integers
- const int8x8_t vs_0 = vreinterpret_s8_u8(v0);
- const int8x8_t vs_1 = vreinterpret_s8_u8(v1);
-
- // Subtract 8 from each byte
- const int8x8_t vb_0 = vsub_s8(vs_0, vdup_n_s8(8));
- const int8x8_t vb_1 = vsub_s8(vs_1, vdup_n_s8(8));
-
- // Interleave and combine
- const int8x8_t vx_0 = vzip1_s8(vb_0, vb_1);
- const int8x8_t vx_1 = vzip2_s8(vb_0, vb_1);
-
- const int8x16_t vq = vcombine_s8(vx_0, vx_1);
-
- // convert to 2x int16x8_t
- const int16x8_t vi_0 = vmovl_s8(vget_low_s8 (vq));
- const int16x8_t vi_1 = vmovl_s8(vget_high_s8(vq));
-
- // convert to 4x float32x4_t
- const float32x4_t vf_0 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (vi_0)));
- const float32x4_t vf_1 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(vi_0)));
- const float32x4_t vf_2 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (vi_1)));
- const float32x4_t vf_3 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(vi_1)));
-
- // Multiply by d
- const float32x4_t r0 = vmulq_f32(vf_0, vd);
- const float32x4_t r1 = vmulq_f32(vf_1, vd);
- const float32x4_t r2 = vmulq_f32(vf_2, vd);
- const float32x4_t r3 = vmulq_f32(vf_3, vd);
-
- // Store
- vst1q_f32(y + i*QK4_0 + l + 0, r0);
- vst1q_f32(y + i*QK4_0 + l + 4, r1);
- vst1q_f32(y + i*QK4_0 + l + 8, r2);
- vst1q_f32(y + i*QK4_0 + l + 12, r3);
- }
- }
-#else
- // scalar
for (int i = 0; i < nb; i++) {
const float d = x[i].d;
- const uint8_t * restrict pp = x[i].qs;
-
- for (int l = 0; l < QK4_0; l += 2) {
- const uint8_t vi = pp[l/2];
-
- const int8_t vi0 = vi & 0x0F;
- const int8_t vi1 = vi >> 4;
-
- const float v0 = (vi0 - 8)*d;
- const float v1 = (vi1 - 8)*d;
+ for (int j = 0; j < qk/2; ++j) {
+ const int x0 = (x[i].qs[j] & 0x0F) - 8;
+ const int x1 = (x[i].qs[j] >> 4) - 8;
- //printf("d = %f, vi = %d, vi0 = %d, vi1 = %d, v0 = %f, v1 = %f\n", d, vi, vi0, vi1, v0, v1);
-
- y[i*QK4_0 + l + 0] = v0;
- y[i*QK4_0 + l + 1] = v1;
-
- assert(!isnan(y[i*QK4_0 + l + 0]));
- assert(!isnan(y[i*QK4_0 + l + 1]));
+ y[i*qk + j + 0 ] = x0*d;
+ y[i*qk + j + qk/2] = x1*d;
}
}
-#endif
}
-static void dequantize_row_q4_1(const void * restrict vx, float * restrict y, int k) {
- assert(k % QK4_1 == 0);
- const int nb = k / QK4_1;
-
- const block_q4_1 * restrict x = vx;
-
-#if defined(__AVX2__)
- for (int i = 0; i < nb; i++) {
- const __m256 d_v = _mm256_broadcast_ss(&x[i].d);
- const __m256 d_m = _mm256_broadcast_ss(&x[i].m);
-
- const uint8_t * restrict pp = x[i].qs;
-
- for (int l = 0; l < QK4_1; l += 32) {
- // Load 32x4-bit integers into 32x8-bit integers
- __m256i vx8 = bytes_from_nibbles_32(pp+l/2);
-
- // Convert to 16-bit int
- const __m256i vx16_lo = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 0));
- const __m256i vx16_hi = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 1));
-
- // Convert to 32-bit int -> float 32
- const __m256 vf[4] = {
- _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 0))),
- _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 1))),
- _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 0))),
- _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 1)))
- };
-
- // Scale, add m and store
- for (int j = 0; j < 4; j++) {
- const __m256 result = _mm256_add_ps(_mm256_mul_ps(vf[j], d_v), d_m);
- _mm256_storeu_ps(y + i * QK4_1 + l + j*8, result);
- }
- }
- }
-#elif defined(__ARM_NEON)
- for (int i = 0; i < nb; i++) {
- const float32x4_t vd = vdupq_n_f32(x[i].d);
- const float32x4_t vm = vdupq_n_f32(x[i].m);
-
- const uint8_t * restrict pp = x[i].qs;
-
- for (int l = 0; l < QK4_1; l += 16) {
- // Load 16x4-bit integers into 8x8-bit integers
- const uint8x8_t v8 = vld1_u8(pp + l/2);
-
- // Expand 4-bit qs to 8-bit bytes
- const uint8x8_t v0 = vand_u8(v8, vdup_n_u8(0x0F));
- const uint8x8_t v1 = vshr_n_u8(v8, 4);
-
- // Interleave and combine
- const uint8x8_t vx_0 = vzip1_u8(v0, v1);
- const uint8x8_t vx_1 = vzip2_u8(v0, v1);
+static void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
+ static const int qk = QK4_1;
- const uint8x16_t vq = vcombine_u8(vx_0, vx_1);
+ assert(k % qk == 0);
- // convert to 2x uint16x8_t
- const uint16x8_t vi_0 = vmovl_u8(vget_low_u8 (vq));
- const uint16x8_t vi_1 = vmovl_u8(vget_high_u8(vq));
+ const int nb = k / qk;
- // convert to 4x float32x4_t
- const float32x4_t vf_0 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_0)));
- const float32x4_t vf_1 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(vi_0)));
- const float32x4_t vf_2 = vcvtq_f32_u32(vmovl_u16(vget_low_u16 (vi_1)));
- const float32x4_t vf_3 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(vi_1)));
-
- // multiply by d and add m
- const float32x4_t r0 = vmlaq_f32(vm, vf_0, vd);
- const float32x4_t r1 = vmlaq_f32(vm, vf_1, vd);
- const float32x4_t r2 = vmlaq_f32(vm, vf_2, vd);
- const float32x4_t r3 = vmlaq_f32(vm, vf_3, vd);
-
- // Store
- vst1q_f32(y + i*QK4_1 + l + 0, r0);
- vst1q_f32(y + i*QK4_1 + l + 4, r1);
- vst1q_f32(y + i*QK4_1 + l + 8, r2);
- vst1q_f32(y + i*QK4_1 + l + 12, r3);
- }
- }
-#else
for (int i = 0; i < nb; i++) {
const float d = x[i].d;
const float m = x[i].m;
- const uint8_t * restrict pp = x[i].qs;
-
- for (int l = 0; l < QK4_1; l += 2) {
- const uint8_t vi = pp[l/2];
-
- const int8_t vi0 = vi & 0x0F;
- const int8_t vi1 = vi >> 4;
-
- const float v0 = vi0*d + m;
- const float v1 = vi1*d + m;
+ for (int j = 0; j < qk/2; ++j) {
+ const int x0 = (x[i].qs[j] & 0x0F);
+ const int x1 = (x[i].qs[j] >> 4);
- y[i*QK4_1 + l + 0] = v0;
- y[i*QK4_1 + l + 1] = v1;
-
- assert(!isnan(y[i*QK4_1 + l + 0]));
- assert(!isnan(y[i*QK4_1 + l + 1]));
+ y[i*qk + j + 0 ] = x0*d + m;
+ y[i*qk + j + qk/2] = x1*d + m;
}
}
-#endif
}
-static void dequantize_row_q4_2(const void * restrict vx, float * restrict y, int k) {
- assert(k % QK4_2 == 0);
- const int nb = k / QK4_2;
-
- const block_q4_2 * restrict x = vx;
-
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
-
- const uint8_t * restrict pp = x[i].qs;
-
- for (int l = 0; l < QK4_2; l += 2) {
- const uint8_t vi = pp[l/2];
-
- const int8_t vi0 = vi & 0x0F;
- const int8_t vi1 = vi >> 4;
-
- const float v0 = (vi0 - 8)*d;
- const float v1 = (vi1 - 8)*d;
-
- y[i*QK4_2 + l + 0] = v0;
- y[i*QK4_2 + l + 1] = v1;
-
- assert(!isnan(y[i*QK4_2 + l + 0]));
- assert(!isnan(y[i*QK4_2 + l + 1]));
- }
- }
-}
+static void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
+ static const int qk = QK5_0;
-static void dequantize_row_q5_0(const void * restrict vx, float * restrict y, int k) {
- assert(k % QK5_0 == 0);
- const int nb = k / QK5_0;
+ assert(k % qk == 0);
- const block_q5_0 * restrict x = vx;
+ const int nb = k / qk;
for (int i = 0; i < nb; i++) {
const float d = GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * restrict pp = x[i].qs;
-
uint32_t qh;
memcpy(&qh, x[i].qh, sizeof(qh));
- for (int l = 0; l < QK5_0; l += 2) {
- const uint8_t vi = pp[l/2];
-
- // extract the 5-th bit from qh
- const uint8_t vh0 = ((qh & (1u << (l + 0))) >> (l + 0)) << 4;
- const uint8_t vh1 = ((qh & (1u << (l + 1))) >> (l + 1)) << 4;
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
+ const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
- const int8_t vi0 = (vi & 0x0F) | vh0;
- const int8_t vi1 = (vi >> 4) | vh1;
+ const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
+ const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
- const float v0 = (vi0 - 16)*d;
- const float v1 = (vi1 - 16)*d;
-
- y[i*QK5_0 + l + 0] = v0;
- y[i*QK5_0 + l + 1] = v1;
-
- assert(!isnan(y[i*QK5_0 + l + 0]));
- assert(!isnan(y[i*QK5_0 + l + 1]));
+ y[i*qk + j + 0 ] = x0*d;
+ y[i*qk + j + qk/2] = x1*d;
}
}
}
-static void dequantize_row_q5_1(const void * restrict vx, float * restrict y, int k) {
- assert(k % QK5_1 == 0);
- const int nb = k / QK5_1;
+static void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
+ static const int qk = QK5_1;
- const block_q5_1 * restrict x = vx;
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
for (int i = 0; i < nb; i++) {
const float d = GGML_FP16_TO_FP32(x[i].d);
const float m = GGML_FP16_TO_FP32(x[i].m);
- const uint8_t * restrict pp = x[i].qs;
-
uint32_t qh;
memcpy(&qh, x[i].qh, sizeof(qh));
- for (int l = 0; l < QK5_1; l += 2) {
- const uint8_t vi = pp[l/2];
-
- // extract the 5-th bit from qh
- const uint8_t vh0 = ((qh & (1u << (l + 0))) >> (l + 0)) << 4;
- const uint8_t vh1 = ((qh & (1u << (l + 1))) >> (l + 1)) << 4;
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
+ const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
- const uint8_t vi0 = (vi & 0x0F) | vh0;
- const uint8_t vi1 = (vi >> 4) | vh1;
+ const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
+ const int x1 = (x[i].qs[j] >> 4) | xh_1;
- const float v0 = vi0*d + m;
- const float v1 = vi1*d + m;
-
- y[i*QK5_1 + l + 0] = v0;
- y[i*QK5_1 + l + 1] = v1;
-
- assert(!isnan(y[i*QK5_1 + l + 0]));
- assert(!isnan(y[i*QK5_1 + l + 1]));
+ y[i*qk + j + 0 ] = x0*d + m;
+ y[i*qk + j + qk/2] = x1*d + m;
}
}
}
static void dequantize_row_q8_0(const void * restrict vx, float * restrict y, int k) {
- assert(k % QK8_0 == 0);
- const int nb = k / QK8_0;
+ static const int qk = QK8_0;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
const block_q8_0 * restrict x = vx;
for (int i = 0; i < nb; i++) {
const float d = x[i].d;
- const int8_t * restrict pp = x[i].qs;
-
- for (int l = 0; l < QK8_0; ++l) {
- y[i*QK8_0 + l] = pp[l]*d;
+ for (int j = 0; j < qk; ++j) {
+ y[i*qk + j] = x[i].qs[j]*d;
}
}
}
static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
-static void ggml_vec_dot_q4_2_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
static const quantize_fns_t quantize_fns[GGML_TYPE_COUNT] = {
[GGML_TYPE_Q4_0] = {
- .dequantize_row_q = dequantize_row_q4_0,
+ .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q4_0,
.quantize_row_q = quantize_row_q4_0,
.quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_0_reference,
.quantize_row_q_dot = quantize_row_q8_0,
.vec_dot_type = GGML_TYPE_Q8_0,
},
[GGML_TYPE_Q4_1] = {
- .dequantize_row_q = dequantize_row_q4_1,
+ .dequantize_row_q = (dequantize_row_q_t)dequantize_row_q4_1,
.quantize_row_q = quantize_row_q4_1,
.quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_1_reference,
.quantize_row_q_dot = quantize_row_q8_1,
.vec_dot_q = ggml_vec_dot_q4_1_q8_1,
.vec_dot_type = GGML_TYPE_Q8_1,
},
- [GGML_TYPE_Q4_2] = {
- .dequantize_row_q = dequantize_row_q4_2,
- .quantize_row_q = quantize_row_q4_2,
- .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_2_reference,
- .quantize_row_q_dot = quantize_row_q8_0,
- .vec_dot_q = ggml_vec_dot_q4_2_q8_0,
- .vec_dot_type = GGML_TYPE_Q8_0,
- },
[GGML_TYPE_Q5_0] = {
- .dequantize_row_q = dequantize_row_q5_0,
+ .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q5_0,
.quantize_row_q = quantize_row_q5_0,
.quantize_row_q_reference = (quantize_row_q_t) quantize_row_q5_0_reference,
.quantize_row_q_dot = quantize_row_q8_0,
.vec_dot_type = GGML_TYPE_Q8_0,
},
[GGML_TYPE_Q5_1] = {
- .dequantize_row_q = dequantize_row_q5_1,
+ .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q5_1,
.quantize_row_q = quantize_row_q5_1,
.quantize_row_q_reference = (quantize_row_q_t) quantize_row_q5_1_reference,
.quantize_row_q_dot = quantize_row_q8_1,
inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
+inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
}
static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const int nb = n / QK8_0;
+ const int qk = QK8_0;
+ const int nb = n / qk;
- assert(n % QK8_0 == 0);
+ assert(n % qk == 0);
assert(nb % 2 == 0);
const block_q4_0 * restrict x = vx;
const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
- // interleave
- const int8x16_t v0_0lz = vzip1q_s8(v0_0ls, v0_0hs);
- const int8x16_t v0_0hz = vzip2q_s8(v0_0ls, v0_0hs);
- const int8x16_t v0_1lz = vzip1q_s8(v0_1ls, v0_1hs);
- const int8x16_t v0_1hz = vzip2q_s8(v0_1ls, v0_1hs);
-
// load y
const int8x16_t v1_0l = vld1q_s8(y0->qs);
const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
#if defined(__ARM_FEATURE_DOTPROD)
// dot product into int32x4_t
- const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0lz, v1_0l), v0_0hz, v1_0h);
- const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1lz, v1_1l), v0_1hz, v1_1h);
+ const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
+ const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), x0->d*y0->d);
sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), x1->d*y1->d);
#else
- const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lz), vget_low_s8 (v1_0l));
- const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lz), vget_high_s8(v1_0l));
- const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hz), vget_low_s8 (v1_0h));
- const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hz), vget_high_s8(v1_0h));
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0h));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h));
- const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lz), vget_low_s8 (v1_1l));
- const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lz), vget_high_s8(v1_1l));
- const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hz), vget_low_s8 (v1_1h));
- const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hz), vget_high_s8(v1_1h));
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1l));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l));
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1h));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h));
const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
// Compute combined scale for the block
const __m256 d = _mm256_mul_ps( _mm256_broadcast_ss( &x[i].d ), _mm256_broadcast_ss( &y[i].d ) );
- __m128i i32[2];
- for (int j = 0; j < 2; ++j) {
- // Load 8 bytes, and unpack 4 bit fields into bytes, making 16 bytes
- __m128i bx = bytes_from_nibbles_16(x[i].qs + 8*j);
- __m128i by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16*j));
-
- // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
- const __m128i off = _mm_set1_epi8( 8 );
- bx = _mm_sub_epi8( bx, off );
+ const __m128i lowMask = _mm_set1_epi8(0xF);
+ const __m128i off = _mm_set1_epi8(8);
- // Get absolute values of x vectors
- const __m128i ax = _mm_sign_epi8(bx, bx);
+ const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
- // Sign the values of the y vectors
- const __m128i sy = _mm_sign_epi8(by, bx);
+ __m128i bx = _mm_and_si128(lowMask, tmp);
+ __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs);
+ bx = _mm_sub_epi8(bx, off);
+ const __m128i i32_0 = mul_sum_i8_pairs(bx, by);
- // Perform multiplication and create 16-bit values
- const __m128i dot = _mm_maddubs_epi16(ax, sy);
-
- const __m128i ones = _mm_set1_epi16(1);
- i32[j] = _mm_madd_epi16(ones, dot);
- }
+ bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
+ by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
+ bx = _mm_sub_epi8(bx, off);
+ const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
// Convert int32_t to float
- __m256 p = _mm256_cvtepi32_ps( _mm256_set_m128i( i32[0], i32[1] ));
+ __m256 p = _mm256_cvtepi32_ps(_mm256_set_m128i(i32_0, i32_1));
+
// Apply the scale, and accumulate
acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
}
*s = hsum_float_8(acc);
+#elif defined(__SSSE3__)
+ // set constants
+ const __m128i lowMask = _mm_set1_epi8(0xF);
+ const __m128i off = _mm_set1_epi8(8);
+
+ // Initialize accumulator with zeros
+ __m128 acc_0 = _mm_setzero_ps();
+ __m128 acc_1 = _mm_setzero_ps();
+ __m128 acc_2 = _mm_setzero_ps();
+ __m128 acc_3 = _mm_setzero_ps();
+
+ // First round without accumulation
+ {
+ _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
+ _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
+
+ // Compute combined scale for the block 0 and 1
+ const __m128 d_0_1 = _mm_mul_ps( _mm_set1_ps( x[0].d ), _mm_set1_ps( y[0].d ) );
+
+ const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
+
+ __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
+ __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
+ bx_0 = _mm_sub_epi8(bx_0, off);
+ const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
+
+ __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
+ __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
+ bx_1 = _mm_sub_epi8(bx_1, off);
+ const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
+
+ _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
+ _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
+
+ // Compute combined scale for the block 2 and 3
+ const __m128 d_2_3 = _mm_mul_ps( _mm_set1_ps( x[1].d ), _mm_set1_ps( y[1].d ) );
+
+ const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
+
+ __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
+ __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
+ bx_2 = _mm_sub_epi8(bx_2, off);
+ const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
+
+ __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
+ __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
+ bx_3 = _mm_sub_epi8(bx_3, off);
+ const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
+
+ // Convert int32_t to float
+ __m128 p0 = _mm_cvtepi32_ps(i32_0);
+ __m128 p1 = _mm_cvtepi32_ps(i32_1);
+ __m128 p2 = _mm_cvtepi32_ps(i32_2);
+ __m128 p3 = _mm_cvtepi32_ps(i32_3);
+
+ // Apply the scale
+ acc_0 = _mm_mul_ps( d_0_1, p0 );
+ acc_1 = _mm_mul_ps( d_0_1, p1 );
+ acc_2 = _mm_mul_ps( d_2_3, p2 );
+ acc_3 = _mm_mul_ps( d_2_3, p3 );
+ }
+
+ // Main loop
+ for (int i = 2; i < nb; i+=2) {
+ _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
+ _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
+
+ // Compute combined scale for the block 0 and 1
+ const __m128 d_0_1 = _mm_mul_ps( _mm_set1_ps( x[i].d ), _mm_set1_ps( y[i].d ) );
+
+ const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
+
+ __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
+ __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
+ bx_0 = _mm_sub_epi8(bx_0, off);
+ const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
+
+ __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
+ __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
+ bx_1 = _mm_sub_epi8(bx_1, off);
+ const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
+
+ _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
+ _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
+
+ // Compute combined scale for the block 2 and 3
+ const __m128 d_2_3 = _mm_mul_ps( _mm_set1_ps( x[i + 1].d ), _mm_set1_ps( y[i + 1].d ) );
+
+ const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
+
+ __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
+ __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
+ bx_2 = _mm_sub_epi8(bx_2, off);
+ const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
+
+ __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
+ __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
+ bx_3 = _mm_sub_epi8(bx_3, off);
+ const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
+
+ // Convert int32_t to float
+ __m128 p0 = _mm_cvtepi32_ps(i32_0);
+ __m128 p1 = _mm_cvtepi32_ps(i32_1);
+ __m128 p2 = _mm_cvtepi32_ps(i32_2);
+ __m128 p3 = _mm_cvtepi32_ps(i32_3);
+
+ // Apply the scale
+ __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
+ __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
+ __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
+ __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
+
+ // Acummulate
+ acc_0 = _mm_add_ps(p0_d, acc_0);
+ acc_1 = _mm_add_ps(p1_d, acc_1);
+ acc_2 = _mm_add_ps(p2_d, acc_2);
+ acc_3 = _mm_add_ps(p3_d, acc_3);
+ }
+
+ *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
#else
// scalar
float sumf = 0.0;
- for (int i = 0; i < nb; i++) {
- const float d0 = x[i].d;
- const float d1 = y[i].d;
-
- const uint8_t * restrict p0 = x[i].qs;
- const int8_t * restrict p1 = y[i].qs;
+ for (int i = 0; i < nb; i++) {
int sumi = 0;
- for (int j = 0; j < QK8_0/2; j++) {
- const uint8_t v0 = p0[j];
- const int i0 = (int8_t) (v0 & 0x0F) - 8;
- const int i1 = (int8_t) (v0 >> 4) - 8;
+ for (int j = 0; j < qk/2; ++j) {
+ const int v0 = (x[i].qs[j] & 0x0F) - 8;
+ const int v1 = (x[i].qs[j] >> 4) - 8;
- const int i2 = p1[2*j + 0];
- const int i3 = p1[2*j + 1];
-
- sumi += i0*i2 + i1*i3;
+ sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
}
- sumf += d0*d1*sumi;
+
+ sumf += (x[i].d*y[i].d)*sumi;
}
+
*s = sumf;
#endif
}
static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const int nb = n / QK8_1;
+ const int qk = QK8_1;
+ const int nb = n / qk;
- assert(n % QK8_1 == 0);
+ assert(n % qk == 0);
assert(nb % 2 == 0);
const block_q4_1 * restrict x = vx;
const block_q8_1 * restrict y = vy;
- // TODO: add AVX / WASM SIMD / etc
+ // TODO: add WASM SIMD
#if defined(__ARM_NEON)
float32x4_t sumv0 = vdupq_n_f32(0.0f);
float32x4_t sumv1 = vdupq_n_f32(0.0f);
const block_q8_1 * restrict y0 = &y[i + 0];
const block_q8_1 * restrict y1 = &y[i + 1];
- summs += x0->m * (y0->s0 + y0->s1) + x1->m * (y1->s0 + y1->s1);
+ summs += x0->m * y0->s + x1->m * y1->s;
const uint8x16_t m4b = vdupq_n_u8(0x0F);
const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
- // interleave
- const int8x16_t v0_0lz = vzip1q_s8(v0_0l, v0_0h);
- const int8x16_t v0_0hz = vzip2q_s8(v0_0l, v0_0h);
- const int8x16_t v0_1lz = vzip1q_s8(v0_1l, v0_1h);
- const int8x16_t v0_1hz = vzip2q_s8(v0_1l, v0_1h);
-
// load y
const int8x16_t v1_0l = vld1q_s8(y0->qs);
const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
#if defined(__ARM_FEATURE_DOTPROD)
// dot product into int32x4_t
- const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0lz, v1_0l), v0_0hz, v1_0h);
- const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1lz, v1_1l), v0_1hz, v1_1h);
+ const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
+ const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), x0->d*y0->d);
sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), x1->d*y1->d);
#else
- const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lz), vget_low_s8 (v1_0l));
- const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lz), vget_high_s8(v1_0l));
- const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hz), vget_low_s8 (v1_0h));
- const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hz), vget_high_s8(v1_0h));
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0h));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0h));
- const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lz), vget_low_s8 (v1_1l));
- const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lz), vget_high_s8(v1_1l));
- const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hz), vget_low_s8 (v1_1h));
- const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hz), vget_high_s8(v1_1h));
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1l));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1l));
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1h));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1h));
const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
const float * d0 = &x[i].d;
const float * d1 = &y[i].d;
- summs += x[i].m * (y[i].s0 + y[i].s1);
+ summs += x[i].m * y[i].s;
const __m256 d0v = _mm256_broadcast_ss( d0 );
const __m256 d1v = _mm256_broadcast_ss( d1 );
#else
// scalar
float sumf = 0.0;
- for (int i = 0; i < nb; i++) {
- const float d0 = x[i].d;
- const float m0 = x[i].m;
- const float d1 = y[i].d;
-
- const uint8_t * restrict p0 = x[i].qs;
- const int8_t * restrict p1 = y[i].qs;
-
- // TODO: this is very slow ..
- for (int j = 0; j < QK8_1/2; j++) {
- const uint8_t v0 = p0[j];
- const float f0 = d0*(v0 & 0x0F) + m0;
- const float f1 = d0*(v0 >> 4) + m0;
+ for (int i = 0; i < nb; i++) {
+ int sumi = 0;
- const float f2 = d1*p1[2*j + 0];
- const float f3 = d1*p1[2*j + 1];
+ for (int j = 0; j < qk/2; ++j) {
+ const int v0 = (x[i].qs[j] & 0x0F);
+ const int v1 = (x[i].qs[j] >> 4);
- sumf += f0*f2 + f1*f3;
+ sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
}
+
+ sumf += (x[i].d*y[i].d)*sumi + x[i].m*y[i].s;
}
+
*s = sumf;
#endif
}
-static void ggml_vec_dot_q4_2_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const int nb = n / QK8_0;
+static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
- assert(n % QK8_0 == 0);
+ assert(n % qk == 0);
assert(nb % 2 == 0);
- assert(QK8_0 == 2*QK4_2);
+ assert(qk == QK5_0);
- const block_q4_2 * restrict x = vx;
+ const block_q5_0 * restrict x = vx;
const block_q8_0 * restrict y = vy;
#if defined(__ARM_NEON)
float32x4_t sumv0 = vdupq_n_f32(0.0f);
float32x4_t sumv1 = vdupq_n_f32(0.0f);
- for (int i = 0; i < nb; i += 2) {
- const block_q4_2 * restrict x0_0 = &x[2*(i + 0) + 0];
- const block_q4_2 * restrict x0_1 = &x[2*(i + 0) + 1];
- const block_q4_2 * restrict x1_0 = &x[2*(i + 1) + 0];
- const block_q4_2 * restrict x1_1 = &x[2*(i + 1) + 1];
+ uint32_t qh0;
+ uint32_t qh1;
- const block_q8_0 * restrict y0 = &y[i + 0];
+ uint64_t tmp0[4];
+ uint64_t tmp1[4];
+
+ for (int i = 0; i < nb; i += 2) {
+ const block_q5_0 * restrict x0 = &x[i];
+ const block_q5_0 * restrict x1 = &x[i + 1];
+ const block_q8_0 * restrict y0 = &y[i];
const block_q8_0 * restrict y1 = &y[i + 1];
- const uint8x16_t m4b = vdupq_n_u8(0x0F);
- const int8x16_t s8b = vdupq_n_s8(0x8);
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
- const uint8x16_t v0_0 = vcombine_u8(vld1_u8(x0_0->qs), vld1_u8(x0_1->qs));
- const uint8x16_t v0_1 = vcombine_u8(vld1_u8(x1_0->qs), vld1_u8(x1_1->qs));
+ // extract the 5th bit via lookup table ((!b) << 4)
+ memcpy(&qh0, x0->qh, sizeof(qh0));
+ memcpy(&qh1, x1->qh, sizeof(qh1));
- // 4-bit -> 8-bit
- const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
- const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
- const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
- const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+ tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
+ tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
+ tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
+ tmp0[3] = table_b2b_1[(qh0 >> 24) ];
- // sub 8
- const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
- const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
- const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
- const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
+ tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
+ tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
+ tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
+ tmp1[3] = table_b2b_1[(qh1 >> 24) ];
+
+ const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
+ const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
+ const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
+ const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
- // interleave
- const int8x16_t v0_0lz = vzip1q_s8(v0_0ls, v0_0hs);
- const int8x16_t v0_0hz = vzip2q_s8(v0_0ls, v0_0hs);
- const int8x16_t v0_1lz = vzip1q_s8(v0_1ls, v0_1hs);
- const int8x16_t v0_1hz = vzip2q_s8(v0_1ls, v0_1hs);
+ // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
+ const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
+ const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
+ const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
+ const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
// load y
const int8x16_t v1_0l = vld1q_s8(y0->qs);
const int8x16_t v1_1l = vld1q_s8(y1->qs);
const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
-#if defined(__ARM_FEATURE_DOTPROD)
- sumv0 = vmlaq_n_f32(sumv0, vaddq_f32(
- vmulq_n_f32(vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_0lz, v1_0l)), GGML_FP16_TO_FP32(x0_0->d)),
- vmulq_n_f32(vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_0hz, v1_0h)), GGML_FP16_TO_FP32(x0_1->d))), y0->d);
+ const float x0d = GGML_FP16_TO_FP32(x0->d);
+ const float x1d = GGML_FP16_TO_FP32(x1->d);
- sumv1 = vmlaq_n_f32(sumv1, vaddq_f32(
- vmulq_n_f32(vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_1lz, v1_1l)), GGML_FP16_TO_FP32(x1_0->d)),
- vmulq_n_f32(vcvtq_f32_s32(vdotq_s32(vdupq_n_s32(0), v0_1hz, v1_1h)), GGML_FP16_TO_FP32(x1_1->d))), y1->d);
+#if defined(__ARM_FEATURE_DOTPROD)
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
+ vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), x0d*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
+ vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), x1d*y1->d);
#else
- const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lz), vget_low_s8 (v1_0l));
- const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lz), vget_high_s8(v1_0l));
- const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hz), vget_low_s8 (v1_0h));
- const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hz), vget_high_s8(v1_0h));
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
- const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lz), vget_low_s8 (v1_1l));
- const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lz), vget_high_s8(v1_1l));
- const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hz), vget_low_s8 (v1_1h));
- const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hz), vget_high_s8(v1_1h));
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
- sumv0 = vmlaq_n_f32(sumv0, vaddq_f32(
- vmulq_n_f32(vcvtq_f32_s32(pl0), GGML_FP16_TO_FP32(x0_0->d)),
- vmulq_n_f32(vcvtq_f32_s32(ph0), GGML_FP16_TO_FP32(x0_1->d))), y0->d);
-
- sumv1 = vmlaq_n_f32(sumv1, vaddq_f32(
- vmulq_n_f32(vcvtq_f32_s32(pl1), GGML_FP16_TO_FP32(x1_0->d)),
- vmulq_n_f32(vcvtq_f32_s32(ph1), GGML_FP16_TO_FP32(x1_1->d))), y1->d);
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), x0d*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), x1d*y1->d);
#endif
}
*s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
+#elif defined(__wasm_simd128__)
+ v128_t sumv = wasm_f32x4_splat(0.0f);
+
+ uint32_t qh;
+ uint64_t tmp[4];
+
+ // TODO: check if unrolling this is better
+ for (int i = 0; i < nb; ++i) {
+ const block_q5_0 * restrict x0 = &x[i];
+ const block_q8_0 * restrict y0 = &y[i];
+
+ const v128_t m4b = wasm_i8x16_splat(0x0F);
+ const v128_t s16b = wasm_i8x16_splat(0x10);
+
+ // extract the 5th bit
+ memcpy(&qh, x0->qh, sizeof(qh));
+
+ tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
+ tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
+ tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
+ tmp[3] = table_b2b_1[(qh >> 24) ];
+
+ const v128_t qhl = wasm_v128_load(tmp + 0);
+ const v128_t qhh = wasm_v128_load(tmp + 2);
+
+ const v128_t v0 = wasm_v128_load(x0->qs);
+
+ // 4-bit -> 8-bit
+ const v128_t v0l = wasm_v128_and (v0, m4b);
+ const v128_t v0h = wasm_u8x16_shr(v0, 4);
+
+ // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
+ const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
+ const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
+
+ // load y
+ const v128_t v1l = wasm_v128_load(y0->qs);
+ const v128_t v1h = wasm_v128_load(y0->qs + 16);
+
+ // int8x16 -> int16x8
+ const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
+ const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
+ const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
+ const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
+
+ const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
+ const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
+ const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
+ const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
+
+ const float x0d = GGML_FP16_TO_FP32(x0->d);
+
+ // dot product
+ sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
+ wasm_i32x4_add(
+ wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
+ wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
+ wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
+ wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), wasm_f32x4_splat(x0d*y0->d)));
+ }
+
+ *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
+ wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
#elif defined(__AVX2__)
// Initialize accumulator with zeros
__m256 acc = _mm256_setzero_ps();
// Main loop
for (int i = 0; i < nb; i++) {
/* Compute combined scale for the block */
- const __m128 d0 = _mm_set1_ps(GGML_FP16_TO_FP32(x[2*i + 0].d));
- const __m128 d1 = _mm_set1_ps(GGML_FP16_TO_FP32(x[2*i + 1].d));
- const __m256 d = _mm256_mul_ps(_mm256_set_m128(d1, d0), _mm256_broadcast_ss(&y[i].d));
-
- __m128i bx0 = bytes_from_nibbles_16(x[2*i + 0].qs);
- __m128i bx1 = bytes_from_nibbles_16(x[2*i + 1].qs);
- __m256i bx = _mm256_set_m128i(bx1, bx0);
+ const __m256 d = _mm256_mul_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d)), _mm256_broadcast_ss(&y[i].d));
- // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
- const __m256i off = _mm256_set1_epi8(8);
- bx = _mm256_sub_epi8(bx, off);
+ __m256i bx = bytes_from_nibbles_32(x[i].qs);
+ __m256i bxhi = bytes_from_bits_32(x[i].qh);
+ bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
+ bx = _mm256_or_si256(bx, bxhi);
__m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
#else
// scalar
float sumf = 0.0;
- for (int i = 0; i < nb; i++) {
- const uint8_t * restrict x0 = x[2*i + 0].qs;
- const uint8_t * restrict x1 = x[2*i + 1].qs;
- const int8_t * restrict y0 = y[i].qs;
-
- const float d0 = GGML_FP16_TO_FP32(x[2*i + 0].d);
- const float d1 = GGML_FP16_TO_FP32(x[2*i + 1].d);
- int sumi_0 = 0;
- int sumi_1 = 0;
-
- for (int j = 0; j < QK8_0/4; j++) {
- const uint8_t v0 = x0[j];
- const uint8_t v1 = x1[j];
-
- const int i0_0 = (int8_t) (v0 & 0x0F) - 8;
- const int i1_0 = (int8_t) (v0 >> 4) - 8;
+ for (int i = 0; i < nb; i++) {
+ uint32_t qh;
+ memcpy(&qh, x[i].qh, sizeof(qh));
- const int i0_1 = (int8_t) (v1 & 0x0F) - 8;
- const int i1_1 = (int8_t) (v1 >> 4) - 8;
+ int sumi = 0;
- const int i2_0 = y0[2*j + 0];
- const int i3_0 = y0[2*j + 1];
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
+ const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
- const int i2_1 = y0[2*(j + QK8_0/4) + 0];
- const int i3_1 = y0[2*(j + QK8_0/4) + 1];
+ const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
+ const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
- sumi_0 += i0_0*i2_0 + i1_0*i3_0;
- sumi_1 += i0_1*i2_1 + i1_1*i3_1;
+ sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
}
- sumf += (d0 * y[i].d) * sumi_0;
- sumf += (d1 * y[i].d) * sumi_1;
- }
- *s = sumf;
-#endif
-}
-
-static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const int nb = n / QK8_0;
-
- assert(n % QK8_0 == 0);
- assert(nb % 2 == 0);
- assert(QK8_0 == QK5_0);
-
- const block_q5_0 * restrict x = vx;
- const block_q8_0 * restrict y = vy;
-
-#if defined(__ARM_NEON)
- float32x4_t sumv = vdupq_n_f32(0.0f);
-
- uint64_t tmp[4];
-
- for (int i = 0; i < nb; ++i) {
- const block_q5_0 * restrict x0 = &x[i];
- const block_q8_0 * restrict y0 = &y[i];
-
- const uint8x16_t m4b = vdupq_n_u8(0x0F);
- const int8x16_t s16b = vdupq_n_s8(0x10);
-
- // extract the 5th bit
- uint32_t qh;
- memcpy(&qh, x0->qh, sizeof(qh));
-
- tmp[0] = table_b2b_u[(qh >> 0) & 0xFF];
- tmp[1] = table_b2b_u[(qh >> 8) & 0xFF];
- tmp[2] = table_b2b_u[(qh >> 16) & 0xFF];
- tmp[3] = table_b2b_u[(qh >> 24) ];
-
- const int8x16_t qhl = vld1q_s8((const int8_t *)(tmp + 0));
- const int8x16_t qhh = vld1q_s8((const int8_t *)(tmp + 2));
-
- const uint8x16_t v0 = vld1q_u8(x0->qs);
-
- // 4-bit -> 8-bit
- const int8x16_t v0l = vreinterpretq_s8_u8(vandq_u8 (v0, m4b));
- const int8x16_t v0h = vreinterpretq_s8_u8(vshrq_n_u8(v0, 4));
-
- // interleave
- const int8x16_t v0lz = vzip1q_s8(v0l, v0h);
- const int8x16_t v0hz = vzip2q_s8(v0l, v0h);
-
- // add high bit and sub 16
- const int8x16_t v0lf = vsubq_s8(vorrq_s8(v0lz, qhl), s16b);
- const int8x16_t v0hf = vsubq_s8(vorrq_s8(v0hz, qhh), s16b);
-
- // load y
- const int8x16_t v1l = vld1q_s8(y0->qs);
- const int8x16_t v1h = vld1q_s8(y0->qs + 16);
-
- const float x0d = GGML_FP16_TO_FP32(x0->d);
-
-#if defined(__ARM_FEATURE_DOTPROD)
- sumv = vmlaq_n_f32(sumv, vcvtq_f32_s32(vaddq_s32(
- vdotq_s32(vdupq_n_s32(0), v0lf, v1l),
- vdotq_s32(vdupq_n_s32(0), v0hf, v1h))), x0d*y0->d);
-#else
- const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0lf), vget_low_s8 (v1l));
- const int16x8_t pl0h = vmull_s8(vget_high_s8(v0lf), vget_high_s8(v1l));
- const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0hf), vget_low_s8 (v1h));
- const int16x8_t ph0h = vmull_s8(vget_high_s8(v0hf), vget_high_s8(v1h));
-
- const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
- const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
-
- sumv = vmlaq_n_f32(sumv, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), x0d*y0->d);
-#endif
- }
-
- *s = vaddvq_f32(sumv);
-#elif defined(__wasm_simd128__)
- v128_t sumv = wasm_f32x4_splat(0.0f);
-
- uint64_t tmp[4];
-
- for (int i = 0; i < nb; ++i) {
- const block_q5_0 * restrict x0 = &x[i];
- const block_q8_0 * restrict y0 = &y[i];
-
- const v128_t m4b = wasm_i8x16_splat(0x0F);
- const v128_t s16b = wasm_i8x16_splat(0x10);
-
- // extract the 5th bit
- uint32_t qh;
- memcpy(&qh, x0->qh, sizeof(qh));
-
- tmp[0] = table_b2b_u[(qh >> 0) & 0xFF];
- tmp[1] = table_b2b_u[(qh >> 8) & 0xFF];
- tmp[2] = table_b2b_u[(qh >> 16) & 0xFF];
- tmp[3] = table_b2b_u[(qh >> 24) ];
-
- const v128_t qhl = wasm_v128_load(tmp + 0);
- const v128_t qhh = wasm_v128_load(tmp + 2);
-
- const v128_t v0 = wasm_v128_load(x0->qs);
-
- // 4-bit -> 8-bit
- const v128_t v0l = wasm_v128_and (v0, m4b);
- const v128_t v0h = wasm_u8x16_shr(v0, 4);
-
- // interleave
- const v128_t v0lz = wasm_v8x16_shuffle(v0l, v0h, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
- const v128_t v0hz = wasm_v8x16_shuffle(v0l, v0h, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-
- // add high bit and sub 16
- const v128_t v0lf = wasm_i8x16_sub(wasm_v128_or(v0lz, qhl), s16b);
- const v128_t v0hf = wasm_i8x16_sub(wasm_v128_or(v0hz, qhh), s16b);
-
- // load y
- const v128_t v1l = wasm_v128_load(y0->qs);
- const v128_t v1h = wasm_v128_load(y0->qs + 16);
-
- // int8x16 -> int16x8
- const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
- const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
- const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
- const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
-
- const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
- const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
- const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
- const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
-
- const float x0d = GGML_FP16_TO_FP32(x0->d);
-
- // dot product
- sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
- wasm_i32x4_add(
- wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
- wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
- wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
- wasm_i32x4_dot_i16x8(v0hfh, v1hh)))), wasm_f32x4_splat(x0d*y0->d)));
- }
-
- *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
- wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
-#elif defined(__AVX2__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
-
- // Main loop
- for (int i = 0; i < nb; i++) {
- /* Compute combined scale for the block */
- const __m256 d = _mm256_mul_ps(_mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d)), _mm256_broadcast_ss(&y[i].d));
-
- __m256i bx = bytes_from_nibbles_32(x[i].qs);
- __m256i bxhi = bytes_from_bits_32(x[i].qh);
- bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
- bx = _mm256_or_si256(bx, bxhi);
-
- __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
-
- const __m256 q = mul_sum_i8_pairs_float(bx, by);
-
- /* Multiply q with scale and accumulate */
- acc = _mm256_fmadd_ps(d, q, acc);
- }
-
- *s = hsum_float_8(acc);
-#else
- // scalar
- float sumf = 0.0;
- for (int i = 0; i < nb; i++) {
- const uint8_t * restrict x0 = x[i].qs;
- const int8_t * restrict y0 = y[i].qs;
-
- uint32_t qh;
- memcpy(&qh, x[i].qh, sizeof(qh));
-
- const float d = GGML_FP16_TO_FP32(x[i].d);
-
- int sxy = 0;
-
- for (int j = 0; j < QK8_0/2; j++) {
- const uint8_t v0 = x0[j];
-
- const int x0_0h = ((qh & (1u << (2*j + 0))) >> (2*j + 0)) << 4;
- const int x1_0h = ((qh & (1u << (2*j + 1))) >> (2*j + 1)) << 4;
-
- const int x0_0 = ((v0 & 0x0F) | x0_0h) - 16;
- const int x1_0 = ((v0 >> 4) | x1_0h) - 16;
-
- const int y0_0 = y0[2*j + 0];
- const int y1_0 = y0[2*j + 1];
-
- sxy += x0_0*y0_0 + x1_0*y1_0;
- }
-
- sumf += (d*sxy)*y[i].d;
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi;
}
+
*s = sumf;
#endif
}
static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const int nb = n / QK8_1;
+ const int qk = QK8_1;
+ const int nb = n / qk;
- assert(n % QK8_1 == 0);
+ assert(n % qk == 0);
assert(nb % 2 == 0);
- assert(QK8_1 == QK5_1);
+ assert(qk == QK5_1);
const block_q5_1 * restrict x = vx;
const block_q8_1 * restrict y = vy;
#if defined(__ARM_NEON)
- float32x4_t sumv = vdupq_n_f32(0.0f);
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
- float summs = 0.0f;
+ float summs0 = 0.0f;
+ float summs1 = 0.0f;
- uint64_t tmp[4];
+ uint32_t qh0;
+ uint32_t qh1;
- for (int i = 0; i < nb; ++i) {
+ uint64_t tmp0[4];
+ uint64_t tmp1[4];
+
+ for (int i = 0; i < nb; i += 2) {
const block_q5_1 * restrict x0 = &x[i];
+ const block_q5_1 * restrict x1 = &x[i + 1];
const block_q8_1 * restrict y0 = &y[i];
+ const block_q8_1 * restrict y1 = &y[i + 1];
- summs += GGML_FP16_TO_FP32(x0->m) * (y0->s0 + y0->s1);
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
- // extract the 5th bit
- uint32_t qh;
- memcpy(&qh, x0->qh, sizeof(qh));
+ summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
+ summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
- tmp[0] = table_b2b_u[(qh >> 0) & 0xFF];
- tmp[1] = table_b2b_u[(qh >> 8) & 0xFF];
- tmp[2] = table_b2b_u[(qh >> 16) & 0xFF];
- tmp[3] = table_b2b_u[(qh >> 24) ];
+ // extract the 5th bit via lookup table ((b) << 4)
+ memcpy(&qh0, x0->qh, sizeof(qh0));
+ memcpy(&qh1, x1->qh, sizeof(qh1));
- const int8x16_t qhl = vld1q_s8((const int8_t *)(tmp + 0));
- const int8x16_t qhh = vld1q_s8((const int8_t *)(tmp + 2));
+ tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
+ tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
+ tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
+ tmp0[3] = table_b2b_0[(qh0 >> 24) ];
- const uint8x16_t v0 = vld1q_u8(x0->qs);
+ tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
+ tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
+ tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
+ tmp1[3] = table_b2b_0[(qh1 >> 24) ];
- // 4-bit -> 8-bit
- const int8x16_t v0l = vreinterpretq_s8_u8(vandq_u8 (v0, vdupq_n_u8(0x0F)));
- const int8x16_t v0h = vreinterpretq_s8_u8(vshrq_n_u8(v0, 4));
+ const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
+ const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
+ const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
+ const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
- // interleave
- const int8x16_t v0lz = vzip1q_s8(v0l, v0h);
- const int8x16_t v0hz = vzip2q_s8(v0l, v0h);
+ // 4-bit -> 8-bit
+ const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
- // add
- const int8x16_t v0lf = vorrq_s8(v0lz, qhl);
- const int8x16_t v0hf = vorrq_s8(v0hz, qhh);
+ // add high bit
+ const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
+ const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
+ const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
+ const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
// load y
- const int8x16_t v1l = vld1q_s8(y0->qs);
- const int8x16_t v1h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
const float x0d = GGML_FP16_TO_FP32(x0->d);
+ const float x1d = GGML_FP16_TO_FP32(x1->d);
#if defined(__ARM_FEATURE_DOTPROD)
- sumv = vmlaq_n_f32(sumv, vcvtq_f32_s32(vaddq_s32(
- vdotq_s32(vdupq_n_s32(0), v0lf, v1l),
- vdotq_s32(vdupq_n_s32(0), v0hf, v1h))), x0d*y0->d);
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
+ vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), x0d*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
+ vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), x1d*y1->d);
#else
- const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0lf), vget_low_s8 (v1l));
- const int16x8_t pl0h = vmull_s8(vget_high_s8(v0lf), vget_high_s8(v1l));
- const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0hf), vget_low_s8 (v1h));
- const int16x8_t ph0h = vmull_s8(vget_high_s8(v0hf), vget_high_s8(v1h));
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
+
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
+ const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
+ const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
- sumv = vmlaq_n_f32(sumv, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), x0d*y0->d);
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), x0d*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), x1d*y1->d);
#endif
}
- *s = vaddvq_f32(sumv) + summs;
+ *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
#elif defined(__wasm_simd128__)
v128_t sumv = wasm_f32x4_splat(0.0f);
float summs = 0.0f;
+ uint32_t qh;
uint64_t tmp[4];
+ // TODO: check if unrolling this is better
for (int i = 0; i < nb; ++i) {
const block_q5_1 * restrict x0 = &x[i];
const block_q8_1 * restrict y0 = &y[i];
- summs += GGML_FP16_TO_FP32(x0->m) * (y0->s0 + y0->s1);
+ summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
const v128_t m4b = wasm_i8x16_splat(0x0F);
// extract the 5th bit
- uint32_t qh;
memcpy(&qh, x0->qh, sizeof(qh));
- tmp[0] = table_b2b_u[(qh >> 0) & 0xFF];
- tmp[1] = table_b2b_u[(qh >> 8) & 0xFF];
- tmp[2] = table_b2b_u[(qh >> 16) & 0xFF];
- tmp[3] = table_b2b_u[(qh >> 24) ];
+ tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
+ tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
+ tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
+ tmp[3] = table_b2b_0[(qh >> 24) ];
const v128_t qhl = wasm_v128_load(tmp + 0);
const v128_t qhh = wasm_v128_load(tmp + 2);
static bool x = true;
- // interleave
- const v128_t v0lz = wasm_v8x16_shuffle(v0l, v0h, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
- const v128_t v0hz = wasm_v8x16_shuffle(v0l, v0h, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-
// add high bit
- const v128_t v0lf = wasm_v128_or(v0lz, qhl);
- const v128_t v0hf = wasm_v128_or(v0hz, qhh);
+ const v128_t v0lf = wasm_v128_or(v0l, qhl);
+ const v128_t v0hf = wasm_v128_or(v0h, qhh);
// load y
const v128_t v1l = wasm_v128_load(y0->qs);
#elif defined(__AVX2__)
// Initialize accumulator with zeros
__m256 acc = _mm256_setzero_ps();
+
float summs = 0.0f;
// Main loop
for (int i = 0; i < nb; i++) {
const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
- summs += GGML_FP16_TO_FP32(x[i].m) * (y[i].s0 + y[i].s1);
+ summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
__m256i bx = bytes_from_nibbles_32(x[i].qs);
__m256i bxhi = bytes_from_bits_32(x[i].qh);
*s = hsum_float_8(acc) + summs;
#else
+ // scalar
float sumf = 0.0;
for (int i = 0; i < nb; i++) {
- const uint8_t * restrict x0 = x[i].qs;
- const int8_t * restrict y0 = y[i].qs;
-
uint32_t qh;
memcpy(&qh, x[i].qh, sizeof(qh));
- const float d = GGML_FP16_TO_FP32(x[i].d);
- const float m = GGML_FP16_TO_FP32(x[i].m);
-
- int sxy = 0;
-
- for (int j = 0; j < QK8_1/2; j++) {
- const uint8_t v0 = x0[j];
-
- const int x0_0h = ((qh & (1u << (2*j + 0))) >> (2*j + 0)) << 4;
- const int x1_0h = ((qh & (1u << (2*j + 1))) >> (2*j + 1)) << 4;
+ int sumi = 0;
- const int x0_0 = (v0 & 0x0F) | x0_0h;
- const int x1_0 = (v0 >> 4) | x1_0h;
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
+ const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
- const int y0_0 = y0[2*j + 0];
- const int y1_0 = y0[2*j + 1];
+ const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
+ const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
- sxy += x0_0*y0_0 + x1_0*y1_0;
+ sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
}
- sumf += (d*sxy)*y[i].d + m*(y[i].s0 + y[i].s1);
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
}
*s = sumf;
}
static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const int nb = n / QK8_0;
+ const int qk = QK8_0;
+ const int nb = n / qk;
- assert(n % QK8_0 == 0);
+ assert(n % qk == 0);
assert(nb % 2 == 0);
- assert(QK8_0 == QK8_0);
const block_q8_0 * restrict x = vx;
const block_q8_0 * restrict y = vy;
float sumf = 0.0;
for (int i = 0; i < nb; i++) {
- const int8_t * restrict x0 = x[i].qs;
- const int8_t * restrict y0 = y[i].qs;
-
int sumi = 0;
- for (int j = 0; j < QK8_0; j++) {
- const int v0 = x0[j];
- const int v1 = y0[j];
-
- sumi += v0*v1;
+ for (int j = 0; j < qk; j++) {
+ sumi += x[i].qs[j]*y[i].qs[j];
}
sumf += (x[i].d*y[i].d)*sumi;
inline static void ggml_vec_norm_f32 (const int n, float * s, const float * x) { ggml_vec_dot_f32(n, s, x, x); *s = sqrtf(*s); }
inline static void ggml_vec_sqr_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]*x[i]; }
inline static void ggml_vec_sqrt_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = sqrtf(x[i]); }
+inline static void ggml_vec_log_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = logf(x[i]); }
inline static void ggml_vec_abs_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fabsf(x[i]); }
inline static void ggml_vec_sgn_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : ((x[i] < 0.f) ? -1.f : 0.f); }
inline static void ggml_vec_step_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? 1.f : 0.f; }
return x/(1.0f + expf(-x));
}
-inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
- const uint16_t * i16 = (const uint16_t *) x;
- for (int i = 0; i < n; ++i) {
- y[i] = table_silu_f16[i16[i]];
- }
-}
+//inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
+// const uint16_t * i16 = (const uint16_t *) x;
+// for (int i = 0; i < n; ++i) {
+// y[i] = table_silu_f16[i16[i]];
+// }
+//}
#ifdef GGML_SILU_FP16
inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
}
#endif
+inline static float ggml_silu_backward_f32(float x, float dy) {
+ const float s = 1.0f/(1.0f + expf(-x));
+ return dy*s*(1.0f + x*(1.0f - s));
+}
+
+#ifdef GGML_SILU_FP16
+inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
+ for (int i = 0; i < n; ++i) {
+ // we did not use x[i] to compute forward silu but its f16 equivalent
+ // take derivative at f16 of x[i]:
+ ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
+ float usedx = GGML_FP16_TO_FP32(fp16);
+ dx[i] = ggml_silu_backward_f32(usedx, dy[i]);
+ }
+}
+#else
+inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) {
+ for (int i = 0; i < n; ++i) {
+ dx[i] = ggml_silu_backward_f32(x[i], dy[i]);
+ }
+}
+#endif
+
inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
#ifndef GGML_USE_ACCELERATE
ggml_float sum = 0.0;
[GGML_TYPE_F16] = 1,
[GGML_TYPE_Q4_0] = QK4_0,
[GGML_TYPE_Q4_1] = QK4_1,
- [GGML_TYPE_Q4_2] = QK4_2,
[GGML_TYPE_Q5_0] = QK5_0,
[GGML_TYPE_Q5_1] = QK5_1,
[GGML_TYPE_Q8_0] = QK8_0,
[GGML_TYPE_F16] = sizeof(ggml_fp16_t),
[GGML_TYPE_Q4_0] = sizeof(block_q4_0),
[GGML_TYPE_Q4_1] = sizeof(block_q4_1),
- [GGML_TYPE_Q4_2] = sizeof(block_q4_2),
[GGML_TYPE_Q5_0] = sizeof(block_q5_0),
[GGML_TYPE_Q5_1] = sizeof(block_q5_1),
[GGML_TYPE_Q8_0] = sizeof(block_q8_0),
[GGML_TYPE_F16] = "f16",
[GGML_TYPE_Q4_0] = "q4_0",
[GGML_TYPE_Q4_1] = "q4_1",
- [GGML_TYPE_Q4_2] = "q4_2",
[GGML_TYPE_Q5_0] = "q5_0",
[GGML_TYPE_Q5_1] = "q5_1",
[GGML_TYPE_Q8_0] = "q8_0",
[GGML_TYPE_F16] = false,
[GGML_TYPE_Q4_0] = true,
[GGML_TYPE_Q4_1] = true,
- [GGML_TYPE_Q4_2] = true,
[GGML_TYPE_Q5_0] = true,
[GGML_TYPE_Q5_1] = true,
[GGML_TYPE_Q8_0] = true,
"DUP",
"ADD",
+ "ADD1",
+ "ACC",
"SUB",
"MUL",
"DIV",
"SQR",
"SQRT",
+ "LOG",
"SUM",
+ "SUM_ROWS",
"MEAN",
"REPEAT",
"ABS",
"RELU",
"GELU",
"SILU",
+ "SILU_BACK",
"NORM",
"RMS_NORM",
+ "RMS_NORM_BACK",
"MUL_MAT",
"SCALE",
+ "SET",
"CPY",
"CONT",
"RESHAPE",
"PERMUTE",
"TRANSPOSE",
"GET_ROWS",
+ "GET_ROWS_BACK",
+ "DIAG",
"DIAG_MASK_INF",
+ "DIAG_MASK_ZERO",
"SOFT_MAX",
"ROPE",
+ "ROPE_BACK",
"ALIBI",
"CONV_1D_1S",
"CONV_1D_2S",
"MAP_BINARY",
};
-static_assert(GGML_OP_COUNT == 39, "GGML_OP_COUNT != 39");
+static_assert(GGML_OP_COUNT == 50, "GGML_OP_COUNT != 50");
static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
"none",
"x",
"x+y",
+ "x+y",
+ "view(x,nb,offset)+=y->x",
"x-y",
"x*y",
"x/y",
"x^2",
"√x",
+ "log(x)",
"Σx",
+ "Σx_k",
"Σx/n",
"repeat(x)",
"abs(x)",
"relu(x)",
"gelu(x)",
"silu(x)",
+ "silu_back(x)",
"norm(x)",
"rms_norm(x)",
+ "rms_norm_back(x)",
"X*Y",
"x*v",
+ "y-\\>view(x)",
"x-\\>y",
"cont(x)",
"reshape(x)",
"permute(x)",
"transpose(x)",
"get_rows(x)",
+ "get_rows_back(x)",
+ "diag(x)",
"diag_mask_inf(x)",
+ "diag_mask_zero(x)",
"soft_max(x)",
"rope(x)",
+ "rope_back(x)",
"alibi(x)",
"conv_1d_1s(x)",
"conv_1d_2s(x)",
"f(x,y)",
};
-static_assert(GGML_OP_COUNT == 39, "GGML_OP_COUNT != 39");
+static_assert(GGML_OP_COUNT == 50, "GGML_OP_COUNT != 50");
static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
case GGML_FTYPE_MOSTLY_F16: wtype = GGML_TYPE_F16; break;
case GGML_FTYPE_MOSTLY_Q4_0: wtype = GGML_TYPE_Q4_0; break;
case GGML_FTYPE_MOSTLY_Q4_1: wtype = GGML_TYPE_Q4_1; break;
- case GGML_FTYPE_MOSTLY_Q4_2: wtype = GGML_TYPE_Q4_2; break;
case GGML_FTYPE_MOSTLY_Q5_0: wtype = GGML_TYPE_Q5_0; break;
case GGML_FTYPE_MOSTLY_Q5_1: wtype = GGML_TYPE_Q5_1; break;
case GGML_FTYPE_MOSTLY_Q8_0: wtype = GGML_TYPE_Q8_0; break;
return (n + 31) & ~31;
}
-static inline int ggml_up64(int n) {
- return (n + 63) & ~63;
-}
+//static inline int ggml_up64(int n) {
+// return (n + 63) & ~63;
+//}
static inline int ggml_up(int n, int m) {
// assert m is a power of 2
return result;
}
+// IMPORTANT:
+// when creating "opt" tensors, always save and load the scratch buffer
+// this is an error prone process, but it is necessary to support inplace
+// operators when using scratch buffers
+// TODO: implement a better way
+void ggml_scratch_save(struct ggml_context * ctx) {
+ ctx->scratch_save = ctx->scratch;
+ ctx->scratch.data = NULL;
+}
+
+void ggml_scratch_load(struct ggml_context * ctx) {
+ ctx->scratch = ctx->scratch_save;
+}
+
////////////////////////////////////////////////////////////////////////////////
struct ggml_tensor * ggml_new_tensor_impl(
*result = (struct ggml_tensor) {
/*.type =*/ type,
+ /*.backend =*/ GGML_BACKEND_CPU,
/*.n_dims =*/ n_dims,
/*.ne =*/ { 1, 1, 1, 1 },
/*.nb =*/ { 0, 0, 0, 0 },
}
struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
- ctx->scratch_save = ctx->scratch;
- ctx->scratch.data = NULL;
+ ggml_scratch_save(ctx);
struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
- ctx->scratch = ctx->scratch_save;
+ ggml_scratch_load(ctx);
ggml_set_i32(result, value);
}
struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
- ctx->scratch_save = ctx->scratch;
- ctx->scratch.data = NULL;
+ ggml_scratch_save(ctx);
struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
- ctx->scratch = ctx->scratch_save;
+ ggml_scratch_load(ctx);
ggml_set_f32(result, value);
return ggml_add_impl(ctx, a, b, true);
}
-// ggml_sub
+// ggml_add1
-struct ggml_tensor * ggml_sub_impl(
+struct ggml_tensor * ggml_add1_impl(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
bool inplace) {
- GGML_ASSERT(ggml_are_same_shape(a, b));
+ GGML_ASSERT(ggml_is_scalar(b));
+ GGML_ASSERT(ggml_is_padded_1d(a));
bool is_node = false;
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
- result->op = GGML_OP_SUB;
+ result->op = GGML_OP_ADD1;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
result->src1 = b;
return result;
}
-struct ggml_tensor * ggml_sub(
+struct ggml_tensor * ggml_add1(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b) {
- return ggml_sub_impl(ctx, a, b, false);
+ return ggml_add1_impl(ctx, a, b, false);
}
-struct ggml_tensor * ggml_sub_inplace(
+struct ggml_tensor * ggml_add1_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b) {
- return ggml_sub_impl(ctx, a, b, true);
+ return ggml_add1_impl(ctx, a, b, true);
}
-// ggml_mul
+// ggml_acc
-struct ggml_tensor * ggml_mul_impl(
+struct ggml_tensor * ggml_acc_impl(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
+ size_t nb1,
+ size_t nb2,
+ size_t nb3,
+ size_t offset,
bool inplace) {
- GGML_ASSERT(ggml_are_same_shape(a, b));
+ GGML_ASSERT(ggml_nelements(b) <= ggml_nelements(a));
+ GGML_ASSERT(ggml_is_contiguous(a));
+ GGML_ASSERT(a->type == GGML_TYPE_F32);
+ GGML_ASSERT(b->type == GGML_TYPE_F32);
bool is_node = false;
is_node = true;
}
- if (inplace) {
- GGML_ASSERT(is_node == false);
- }
-
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
- result->op = GGML_OP_MUL;
+ ggml_scratch_save(ctx);
+
+ struct ggml_tensor * c = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 5);
+
+ ((int32_t *) c->data)[0] = nb1;
+ ((int32_t *) c->data)[1] = nb2;
+ ((int32_t *) c->data)[2] = nb3;
+ ((int32_t *) c->data)[3] = offset;
+ ((int32_t *) c->data)[4] = inplace ? 1 : 0;
+
+ ggml_scratch_load(ctx);
+
+ result->op = GGML_OP_ACC;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
result->src1 = b;
+ result->opt[0] = c;
return result;
}
-struct ggml_tensor * ggml_mul(
+struct ggml_tensor * ggml_acc(
struct ggml_context * ctx,
- struct ggml_tensor * a,
- struct ggml_tensor * b) {
- return ggml_mul_impl(ctx, a, b, false);
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ size_t nb1,
+ size_t nb2,
+ size_t nb3,
+ size_t offset) {
+ return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
}
-struct ggml_tensor * ggml_mul_inplace(
+struct ggml_tensor * ggml_acc_inplace(
struct ggml_context * ctx,
- struct ggml_tensor * a,
- struct ggml_tensor * b) {
- return ggml_mul_impl(ctx, a, b, true);
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ size_t nb1,
+ size_t nb2,
+ size_t nb3,
+ size_t offset) {
+ return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
}
-// ggml_div
+// ggml_sub
-struct ggml_tensor * ggml_div_impl(
+struct ggml_tensor * ggml_sub_impl(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ bool inplace) {
+ GGML_ASSERT(ggml_are_same_shape(a, b));
+
+ bool is_node = false;
+
+ if (!inplace && (a->grad || b->grad)) {
+ is_node = true;
+ }
+
+ struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
+
+ result->op = GGML_OP_SUB;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = b;
+
+ return result;
+}
+
+struct ggml_tensor * ggml_sub(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b) {
+ return ggml_sub_impl(ctx, a, b, false);
+}
+
+struct ggml_tensor * ggml_sub_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b) {
+ return ggml_sub_impl(ctx, a, b, true);
+}
+
+// ggml_mul
+
+struct ggml_tensor * ggml_mul_impl(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ bool inplace) {
+ GGML_ASSERT(ggml_are_same_shape(a, b));
+
+ bool is_node = false;
+
+ if (!inplace && (a->grad || b->grad)) {
+ is_node = true;
+ }
+
+ if (inplace) {
+ GGML_ASSERT(is_node == false);
+ }
+
+ struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
+
+ result->op = GGML_OP_MUL;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = b;
+
+ return result;
+}
+
+struct ggml_tensor * ggml_mul(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b) {
+ return ggml_mul_impl(ctx, a, b, false);
+}
+
+struct ggml_tensor * ggml_mul_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b) {
+ return ggml_mul_impl(ctx, a, b, true);
+}
+
+// ggml_div
+
+struct ggml_tensor * ggml_div_impl(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
return ggml_sqrt_impl(ctx, a, true);
}
+
+// ggml_log
+
+struct ggml_tensor * ggml_log_impl(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ bool inplace) {
+ bool is_node = false;
+
+ if (!inplace && (a->grad)) {
+ is_node = true;
+ }
+
+ struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
+
+ result->op = GGML_OP_LOG;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = NULL;
+
+ return result;
+}
+
+struct ggml_tensor * ggml_log(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_log_impl(ctx, a, false);
+}
+
+struct ggml_tensor * ggml_log_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_log_impl(ctx, a, true);
+}
+
// ggml_sum
struct ggml_tensor * ggml_sum(
return result;
}
+
+// ggml_sum_rows
+
+struct ggml_tensor * ggml_sum_rows(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ bool is_node = false;
+
+ if (a->grad) {
+ is_node = true;
+ }
+
+ int64_t ne[4] = {1,1,1,1};
+ for (int i=1; i<a->n_dims; ++i) {
+ ne[i] = a->ne[i];
+ }
+
+ struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, a->n_dims, ne);
+
+ result->op = GGML_OP_SUM_ROWS;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = NULL;
+
+ return result;
+}
+
// ggml_mean
struct ggml_tensor * ggml_mean(
return ggml_silu_impl(ctx, a, true);
}
+// ggml_silu_back
+
+struct ggml_tensor * ggml_silu_back(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b) {
+ bool is_node = false;
+
+ if (a->grad || b->grad) {
+ // TODO: implement backward
+ is_node = true;
+ }
+
+ struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
+
+ result->op = GGML_OP_SILU_BACK;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = b;
+
+ return result;
+}
+
// ggml_norm
struct ggml_tensor * ggml_norm_impl(
bool is_node = false;
if (!inplace && (a->grad)) {
- GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
return ggml_rms_norm_impl(ctx, a, true);
}
+struct ggml_tensor * ggml_rms_norm_back(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b) {
+ bool is_node = false;
+
+ if (a->grad) {
+ // TODO: implement backward
+ is_node = true;
+ }
+
+ struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
+
+ result->op = GGML_OP_RMS_NORM_BACK;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = b;
+
+ return result;
+}
+
+
// ggml_mul_mat
struct ggml_tensor * ggml_mul_mat(
bool is_node = false;
if (!inplace && (a->grad || b->grad)) {
- GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
- // TODO: when implement backward, fix this:
- //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
- struct ggml_tensor * result = ggml_view_tensor(ctx, a);
+ struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
result->op = GGML_OP_SCALE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
return ggml_scale_impl(ctx, a, b, true);
}
+// ggml_set
+
+struct ggml_tensor * ggml_set_impl(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ size_t nb1,
+ size_t nb2,
+ size_t nb3,
+ size_t offset,
+ bool inplace) {
+ GGML_ASSERT(ggml_nelements(a) >= ggml_nelements(b));
+
+ bool is_node = false;
+
+ if (!inplace && (a->grad || b->grad)) {
+ is_node = true;
+ }
+
+ // make a view of the destination
+ struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
+
+ ggml_scratch_save(ctx);
+
+ struct ggml_tensor * c = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 5);
+
+ (( int32_t * ) c->data)[0] = nb1;
+ (( int32_t * ) c->data)[1] = nb2;
+ (( int32_t * ) c->data)[2] = nb3;
+ (( int32_t * ) c->data)[3] = offset;
+ (( int32_t * ) c->data)[4] = inplace ? 1 : 0;
+
+ ggml_scratch_load(ctx);
+
+ result->op = GGML_OP_SET;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = b;
+ result->opt[0] = c;
+
+ return result;
+}
+
+struct ggml_tensor * ggml_set(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ size_t nb1,
+ size_t nb2,
+ size_t nb3,
+ size_t offset) {
+ return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
+}
+
+struct ggml_tensor * ggml_set_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ size_t nb1,
+ size_t nb2,
+ size_t nb3,
+ size_t offset) {
+ return ggml_set_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
+}
+
+struct ggml_tensor * ggml_set_1d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ size_t offset) {
+ return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, false);
+}
+
+struct ggml_tensor * ggml_set_1d_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ size_t offset) {
+ return ggml_set_impl(ctx, a, b, a->nb[1], a->nb[2], a->nb[3], offset, true);
+}
+
+struct ggml_tensor * ggml_set_2d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ size_t nb1,
+ size_t offset) {
+ return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
+}
+
+struct ggml_tensor * ggml_set_2d_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ size_t nb1,
+ size_t offset) {
+ return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
+}
+
+
// ggml_cpy
struct ggml_tensor * ggml_cpy_impl(
bool is_node = false;
if (!inplace && (a->grad || b->grad)) {
- GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
bool is_node = false;
if (!inplace && a->grad) {
- GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
bool is_node = false;
- if (a->grad || b->grad) {
- GGML_ASSERT(false); // TODO: implement backward
+ if (a->grad) {
is_node = true;
}
+ if (b->grad) {
+ // gradient propagation is not supported
+ //GGML_ASSERT(false);
+ }
+
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a->data);
result->op = GGML_OP_RESHAPE;
return result;
}
-struct ggml_tensor * ggml_reshape_2d(
+struct ggml_tensor * ggml_reshape_1d(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int64_t ne0,
- int64_t ne1) {
+ int64_t ne0) {
GGML_ASSERT(ggml_is_contiguous(a));
- GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
+ GGML_ASSERT(ggml_nelements(a) == ne0);
bool is_node = false;
if (a->grad) {
- GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
- const int64_t ne[2] = { ne0, ne1 };
- struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data);
+ const int64_t ne[1] = { ne0 };
+ struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a->data);
result->op = GGML_OP_RESHAPE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
return result;
}
-struct ggml_tensor * ggml_reshape_3d(
+struct ggml_tensor * ggml_reshape_2d(
struct ggml_context * ctx,
struct ggml_tensor * a,
int64_t ne0,
- int64_t ne1,
- int64_t ne2) {
+ int64_t ne1) {
GGML_ASSERT(ggml_is_contiguous(a));
- GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
+ GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
bool is_node = false;
if (a->grad) {
- GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
- const int64_t ne[3] = { ne0, ne1, ne2 };
- struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data);
-
+ const int64_t ne[2] = { ne0, ne1 };
+ struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data);
+
+ result->op = GGML_OP_RESHAPE;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = NULL;
+
+ return result;
+}
+
+struct ggml_tensor * ggml_reshape_3d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int64_t ne0,
+ int64_t ne1,
+ int64_t ne2) {
+ GGML_ASSERT(ggml_is_contiguous(a));
+ GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
+
+ bool is_node = false;
+
+ if (a->grad) {
+ is_node = true;
+ }
+
+ const int64_t ne[3] = { ne0, ne1, ne2 };
+ struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data);
+
+ result->op = GGML_OP_RESHAPE;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = NULL;
+
+ return result;
+}
+
+
+struct ggml_tensor * ggml_reshape_4d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int64_t ne0,
+ int64_t ne1,
+ int64_t ne2,
+ int64_t ne3) {
+ GGML_ASSERT(ggml_is_contiguous(a));
+ GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2*ne3);
+
+ bool is_node = false;
+
+ if (a->grad) {
+ is_node = true;
+ }
+
+ const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
+ struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a->data);
+
result->op = GGML_OP_RESHAPE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
struct ggml_tensor * a,
int64_t ne0,
size_t offset) {
+
+ bool is_node = false;
+
if (a->grad) {
- GGML_ASSERT(false); // gradient propagation is not supported
+ is_node = true;
}
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset);
result->op = GGML_OP_VIEW;
- result->grad = NULL;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
- result->src1 = NULL; // TODO: maybe store the offset here?
+ result->src1 = NULL;
+
+ if (is_node) {
+ memcpy(result->padding, &offset, sizeof(offset));
+ }
return result;
}
int64_t ne1,
size_t nb1,
size_t offset) {
+
+ bool is_node = false;
+
if (a->grad) {
- GGML_ASSERT(false); // gradient propagation is not supported
+ is_node = true;
}
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 };
result->nb[3] = result->nb[2];
result->op = GGML_OP_VIEW;
- result->grad = NULL;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
- result->src1 = NULL; // TODO: maybe store the offset here?
+ result->src1 = NULL;
+
+ if (is_node) {
+ memcpy(result->padding, &offset, sizeof(offset));
+ }
return result;
}
size_t nb1,
size_t nb2,
size_t offset) {
+
+ bool is_node = false;
+
if (a->grad) {
- GGML_ASSERT(false); // gradient propagation is not supported
+ is_node = true;
}
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, 1 };
result->nb[3] = result->nb[2]*ne2;
result->op = GGML_OP_VIEW;
- result->grad = NULL;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = NULL;
+
+ if (is_node) {
+ memcpy(result->padding, &offset, sizeof(offset));
+ }
+
+ return result;
+}
+
+// ggml_view_4d
+
+struct ggml_tensor * ggml_view_4d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int64_t ne0,
+ int64_t ne1,
+ int64_t ne2,
+ int64_t ne3,
+ size_t nb1,
+ size_t nb2,
+ size_t nb3,
+ size_t offset) {
+
+ bool is_node = false;
+
+ if (a->grad) {
+ is_node = true;
+ }
+
+ const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, ne3 };
+
+ struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, (char *) a->data + offset);
+
+ result->nb[1] = nb1;
+ result->nb[2] = nb2;
+ result->nb[3] = nb3;
+
+ result->op = GGML_OP_VIEW;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
- result->src1 = NULL; // TODO: maybe store the offset here?
+ result->src1 = NULL;
+
+ if (is_node) {
+ memcpy(result->padding, &offset, sizeof(offset));
+ }
return result;
}
bool is_node = false;
if (a->grad) {
- GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
result->op = GGML_OP_PERMUTE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
- result->src1 = NULL; // TODO: maybe store the permutation here?
+ result->src1 = NULL;
+
+ if (is_node) {
+ result->padding[0] = axis0;
+ result->padding[1] = axis1;
+ result->padding[2] = axis2;
+ result->padding[3] = axis3;
+ }
return result;
}
bool is_node = false;
if (a->grad) {
- GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
bool is_node = false;
if (a->grad || b->grad) {
- GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
return result;
}
-// ggml_diag_mask_inf
+// ggml_get_rows_back
-struct ggml_tensor * ggml_diag_mask_inf(
+struct ggml_tensor * ggml_get_rows_back(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past) {
+ struct ggml_tensor * b,
+ struct ggml_tensor * c) {
+ GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
+ GGML_ASSERT(ggml_is_matrix(c) && (a->ne[0] == c->ne[0]));
+
bool is_node = false;
- if (a->grad) {
- GGML_ASSERT(false); // TODO: implement backward
+ if (a->grad || b->grad) {
is_node = true;
}
- // TODO: when implement backward, fix this:
- //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
- struct ggml_tensor * result = ggml_view_tensor(ctx, a);
- struct ggml_tensor * b = ggml_new_i32(ctx, n_past);
- ggml_set_name(b, "n_past");
+ // TODO: implement non F32 return
+ //struct ggml_tensor * result = ggml_new_tensor_2d(ctx, a->type, a->ne[0], b->ne[0]);
+ struct ggml_tensor * result = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, c->ne[0], c->ne[1]);
- result->op = GGML_OP_DIAG_MASK_INF;
+ result->op = GGML_OP_GET_ROWS_BACK;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
result->src1 = b;
+ result->opt[0] = c;
return result;
}
-// ggml_soft_max
+// ggml_diag
-struct ggml_tensor * ggml_soft_max(
+struct ggml_tensor * ggml_diag(
struct ggml_context * ctx,
struct ggml_tensor * a) {
+ GGML_ASSERT(a->ne[1] == 1);
bool is_node = false;
if (a->grad) {
- GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
- // TODO: when implement backward, fix this:
- //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
- struct ggml_tensor * result = ggml_view_tensor(ctx, a);
+ const int64_t ne[4] = { a->ne[0], a->ne[0], a->ne[2], a->ne[3] };
+ struct ggml_tensor * result = ggml_new_tensor(ctx, a->type, MAX(a->n_dims, 2), ne);
- result->op = GGML_OP_SOFT_MAX;
+ result->op = GGML_OP_DIAG;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
result->src1 = NULL;
return result;
}
-// ggml_rope
-struct ggml_tensor * ggml_rope(
+// ggml_diag_mask_inf
+
+struct ggml_tensor * ggml_diag_mask_inf_impl(
struct ggml_context * ctx,
struct ggml_tensor * a,
int n_past,
- int n_dims,
- int mode) {
- GGML_ASSERT(n_past >= 0);
+ bool inplace) {
bool is_node = false;
if (a->grad) {
- GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
- // TODO: when implement backward, fix this:
- //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
- struct ggml_tensor * result = ggml_view_tensor(ctx, a);
+ struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
+
+ ggml_scratch_save(ctx);
+
+ struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
- struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 3);
((int32_t *) b->data)[0] = n_past;
- ((int32_t *) b->data)[1] = n_dims;
- ((int32_t *) b->data)[2] = mode;
- ggml_set_name(b, "n_past, n_dims, mode");
+ ((int32_t *) b->data)[1] = inplace ? 1 : 0;
- result->op = GGML_OP_ROPE;
+ ggml_scratch_load(ctx);
+
+ result->op = GGML_OP_DIAG_MASK_INF;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
result->src1 = b;
return result;
}
-// ggml_alibi
+struct ggml_tensor * ggml_diag_mask_inf(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int n_past) {
+ return ggml_diag_mask_inf_impl(ctx, a, n_past, false);
+}
-struct ggml_tensor * ggml_alibi(
+
+struct ggml_tensor * ggml_diag_mask_inf_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int n_past) {
+ return ggml_diag_mask_inf_impl(ctx, a, n_past, true);
+}
+
+// ggml_diag_mask_zero
+
+struct ggml_tensor * ggml_diag_mask_zero_impl(
struct ggml_context * ctx,
struct ggml_tensor * a,
int n_past,
- int n_head) {
- GGML_ASSERT(n_past >= 0);
+ bool inplace) {
bool is_node = false;
if (a->grad) {
- GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
- // TODO: when implement backward, fix this:
- //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
- struct ggml_tensor * result = ggml_view_tensor(ctx, a);
+ struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
+
+ ggml_scratch_save(ctx);
struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
+ ggml_set_name(b, "n_past, inplace");
+
((int32_t *) b->data)[0] = n_past;
- ((int32_t *) b->data)[1] = n_head;
+ ((int32_t *) b->data)[1] = inplace ? 1 : 0;
- result->op = GGML_OP_ALIBI;
+ ggml_scratch_load(ctx);
+
+ result->op = GGML_OP_DIAG_MASK_ZERO;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
result->src1 = b;
return result;
}
-// ggml_conv_1d_1s
+struct ggml_tensor * ggml_diag_mask_zero(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int n_past) {
+ return ggml_diag_mask_zero_impl(ctx, a, n_past, false);
+}
-struct ggml_tensor * ggml_conv_1d_1s(
+struct ggml_tensor * ggml_diag_mask_zero_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
- struct ggml_tensor * b) {
- GGML_ASSERT(ggml_is_matrix(b));
- GGML_ASSERT(a->ne[1] == b->ne[1]);
- GGML_ASSERT(a->ne[3] == 1);
+ int n_past) {
+ return ggml_diag_mask_zero_impl(ctx, a, n_past, true);
+}
+
+// ggml_soft_max
+
+struct ggml_tensor * ggml_soft_max_impl(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ bool inplace) {
bool is_node = false;
- if (a->grad || b->grad) {
- GGML_ASSERT(false); // TODO: implement backward
+ if (a->grad) {
is_node = true;
}
- const int64_t ne[4] = { b->ne[0], a->ne[2], 1, 1, };
- struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
+ struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
- result->op = GGML_OP_CONV_1D_1S;
+ result->op = GGML_OP_SOFT_MAX;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
- result->src1 = b;
+ result->src1 = NULL;
return result;
}
-// ggml_conv_1d_2s
+struct ggml_tensor * ggml_soft_max(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_soft_max_impl(ctx, a, false);
+}
-struct ggml_tensor * ggml_conv_1d_2s(
+struct ggml_tensor * ggml_soft_max_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_soft_max_impl(ctx, a, true);
+}
+
+// ggml_rope
+
+struct ggml_tensor * ggml_rope_impl(
struct ggml_context * ctx,
struct ggml_tensor * a,
- struct ggml_tensor * b) {
- GGML_ASSERT(ggml_is_matrix(b));
- GGML_ASSERT(a->ne[1] == b->ne[1]);
- GGML_ASSERT(a->ne[3] == 1);
+ int n_past,
+ int n_dims,
+ int mode,
+ bool inplace) {
+ GGML_ASSERT(n_past >= 0);
bool is_node = false;
- if (a->grad || b->grad) {
- GGML_ASSERT(false); // TODO: implement backward
+ if (!inplace && a->grad) {
is_node = true;
}
- const int64_t ne[4] = { b->ne[0]/2, a->ne[2], 1, 1, };
- struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
+ struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
- result->op = GGML_OP_CONV_1D_2S;
+ ggml_scratch_save(ctx);
+
+ struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 3);
+
+ ((int32_t *) b->data)[0] = n_past;
+ ((int32_t *) b->data)[1] = n_dims;
+ ((int32_t *) b->data)[2] = mode;
+
+ ggml_scratch_load(ctx);
+
+ result->op = GGML_OP_ROPE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
result->src1 = b;
return result;
}
-// ggml_flash_attn
+struct ggml_tensor * ggml_rope(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int n_past,
+ int n_dims,
+ int mode) {
+ return ggml_rope_impl(ctx, a, n_past, n_dims, mode, false);
+}
-struct ggml_tensor * ggml_flash_attn(
+struct ggml_tensor * ggml_rope_inplace(
struct ggml_context * ctx,
- struct ggml_tensor * q,
+ struct ggml_tensor * a,
+ int n_past,
+ int n_dims,
+ int mode) {
+ return ggml_rope_impl(ctx, a, n_past, n_dims, mode, true);
+}
+
+// ggml_rope_back
+
+struct ggml_tensor * ggml_rope_back(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int n_past,
+ int n_dims,
+ int mode) {
+ GGML_ASSERT(n_past >= 0);
+ bool is_node = false;
+
+ if (a->grad) {
+ GGML_ASSERT(false); // TODO: implement backward
+ is_node = true;
+ }
+
+ struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
+
+ ggml_scratch_save(ctx);
+
+ struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 3);
+ ggml_set_name(b, "n_past, n_dims, mode");
+
+ ((int32_t *) b->data)[0] = n_past;
+ ((int32_t *) b->data)[1] = n_dims;
+ ((int32_t *) b->data)[2] = mode;
+
+ ggml_scratch_load(ctx);
+
+ result->op = GGML_OP_ROPE_BACK;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = b;
+
+ return result;
+}
+
+// ggml_alibi
+
+struct ggml_tensor * ggml_alibi(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int n_past,
+ int n_head) {
+ GGML_ASSERT(n_past >= 0);
+ bool is_node = false;
+
+ if (a->grad) {
+ GGML_ASSERT(false); // TODO: implement backward
+ is_node = true;
+ }
+
+ // TODO: when implement backward, fix this:
+ //struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
+ struct ggml_tensor * result = ggml_view_tensor(ctx, a);
+
+ ggml_scratch_save(ctx);
+
+ struct ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 2);
+
+ ((int32_t *) b->data)[0] = n_past;
+ ((int32_t *) b->data)[1] = n_head;
+
+ ggml_scratch_load(ctx);
+
+ result->op = GGML_OP_ALIBI;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = b;
+
+ return result;
+}
+
+// ggml_conv_1d_1s
+
+struct ggml_tensor * ggml_conv_1d_1s(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b) {
+ GGML_ASSERT(ggml_is_matrix(b));
+ GGML_ASSERT(a->ne[1] == b->ne[1]);
+ GGML_ASSERT(a->ne[3] == 1);
+ bool is_node = false;
+
+ if (a->grad || b->grad) {
+ GGML_ASSERT(false); // TODO: implement backward
+ is_node = true;
+ }
+
+ const int64_t ne[4] = { b->ne[0], a->ne[2], 1, 1, };
+ struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
+
+ result->op = GGML_OP_CONV_1D_1S;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = b;
+
+ return result;
+}
+
+// ggml_conv_1d_2s
+
+struct ggml_tensor * ggml_conv_1d_2s(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b) {
+ GGML_ASSERT(ggml_is_matrix(b));
+ GGML_ASSERT(a->ne[1] == b->ne[1]);
+ GGML_ASSERT(a->ne[3] == 1);
+ bool is_node = false;
+
+ if (a->grad || b->grad) {
+ GGML_ASSERT(false); // TODO: implement backward
+ is_node = true;
+ }
+
+ const int64_t ne[4] = { b->ne[0]/2, a->ne[2], 1, 1, };
+ struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
+
+ result->op = GGML_OP_CONV_1D_2S;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = b;
+
+ return result;
+}
+
+// ggml_flash_attn
+
+struct ggml_tensor * ggml_flash_attn(
+ struct ggml_context * ctx,
+ struct ggml_tensor * q,
struct ggml_tensor * k,
struct ggml_tensor * v,
bool masked) {
// ggml_compute_forward_dup
+static void ggml_compute_forward_dup_same_cont(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
+ GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
+ GGML_ASSERT(src0->type == dst->type);
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const size_t nb00 = src0->nb[0];
+ const size_t nb0 = dst->nb[0];
+
+ const int ith = params->ith; // thread index
+ const int nth = params->nth; // number of threads
+
+ // parallelize by elements
+ const int ne = ggml_nelements(dst);
+ const int dr = (ne + nth - 1) / nth;
+ const int ie0 = dr * ith;
+ const int ie1 = MIN(ie0 + dr, ne);
+
+ if (ie0 < ie1) {
+ memcpy(
+ ((char *) dst->data + ie0*nb0),
+ ((char *) src0->data + ie0*nb00),
+ (ie1 - ie0) * GGML_TYPE_SIZE[src0->type]);
+ }
+
+}
static void ggml_compute_forward_dup_f16(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const int nth = params->nth; // number of threads
if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
- // parallelize by elements
- const int ne = ggml_nelements(dst);
- const int dr = (ne + nth - 1) / nth;
- const int ie0 = dr * ith;
- const int ie1 = MIN(ie0 + dr, ne);
-
- memcpy(
- ((char *) dst->data + ie0*nb0),
- ((char *) src0->data + ie0*nb00),
- (ie1 - ie0) * GGML_TYPE_SIZE[src0->type]);
-
+ ggml_compute_forward_dup_same_cont(params, src0, dst);
return;
}
const int nth = params->nth; // number of threads
if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
- // parallelize by elements
- const int ne = ggml_nelements(dst);
- const int dr = (ne + nth - 1) / nth;
- const int ie0 = dr * ith;
- const int ie1 = MIN(ie0 + dr, ne);
-
- memcpy(
- ((char *) dst->data + ie0*nb0),
- ((char *) src0->data + ie0*nb00),
- (ie1 - ie0) * GGML_TYPE_SIZE[src0->type]);
-
+ ggml_compute_forward_dup_same_cont(params, src0, dst);
return;
}
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
struct ggml_tensor * dst) {
+ if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst) && src0->type == dst->type) {
+ ggml_compute_forward_dup_same_cont(params, src0, dst);
+ return;
+ }
switch (src0->type) {
case GGML_TYPE_F16:
{
const int ith = params->ith;
const int nth = params->nth;
- const int n = ggml_nrows(src0);
- const int nc = src0->ne[0];
+ const int nr = ggml_nrows(src0);
+ const int64_t ne0 = src0->ne[0];
+ const int64_t ne1 = src0->ne[1];
+ const int64_t ne2 = src0->ne[2];
const size_t nb00 = src0->nb[0];
const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
const size_t nb10 = src1->nb[0];
const size_t nb11 = src1->nb[1];
+ const size_t nb12 = src1->nb[2];
+ const size_t nb13 = src1->nb[3];
const size_t nb0 = dst->nb[0];
const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
GGML_ASSERT( nb0 == sizeof(float));
GGML_ASSERT(nb00 == sizeof(float));
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
if (nb10 == sizeof(float)) {
- for (int j = ith; j < n; j += nth) {
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+
#ifdef GGML_USE_ACCELERATE
vDSP_vadd(
- (float *) ((char *) src0->data + j*nb01), 1,
- (float *) ((char *) src1->data + j*nb11), 1,
- (float *) ((char *) dst->data + j*nb1), 1, nc);
+ (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
+ (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
+ ne0);
#else
- ggml_vec_add_f32(nc,
- (float *) ((char *) dst->data + j*nb1),
- (float *) ((char *) src0->data + j*nb01),
- (float *) ((char *) src1->data + j*nb11));
+ ggml_vec_add_f32(ne0,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
+ (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
+ (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
#endif
+ // }
+ // }
}
} else {
// src1 is not contiguous
- for (int j = ith; j < n; j += nth) {
- float * dst_ptr = (float *) ((char *) dst->data + j*nb1);
- float * src0_ptr = (float *) ((char *) src0->data + j*nb01);
- for (int i = 0; i < nc; i++) {
- float * src1_ptr = (float *) ((char *) src1->data + j*nb11 + i*nb10);
-
- dst_ptr[i] = src0_ptr[i] + *src1_ptr;
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
+ float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ for (int i0 = 0; i0 < ne0; i0++) {
+ float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
+
+ dst_ptr[i0] = src0_ptr[i0] + *src1_ptr;
}
}
}
const int ith = params->ith;
const int nth = params->nth;
- const int n = ggml_nrows(src0);
- const int nc = src0->ne[0];
+ const int nr = ggml_nrows(src0);
+ const int64_t ne0 = src0->ne[0];
+ const int64_t ne1 = src0->ne[1];
+ const int64_t ne2 = src0->ne[2];
const size_t nb00 = src0->nb[0];
const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
const size_t nb10 = src1->nb[0];
const size_t nb11 = src1->nb[1];
+ const size_t nb12 = src1->nb[2];
+ const size_t nb13 = src1->nb[3];
const size_t nb0 = dst->nb[0];
const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
if (nb10 == sizeof(float)) {
- for (int j = ith; j < n; j += nth) {
- ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j*nb1);
- ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j*nb01);
- for (int i = 0; i < nc; i++) {
- float * src1_ptr = (float *) ((char *) src1->data + j*nb11 + i*nb10);
- dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + *src1_ptr);
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
+ ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
+
+ for (int i = 0; i < ne0; i++) {
+ dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
}
}
}
const int ith = params->ith;
const int nth = params->nth;
- const int n = ggml_nrows(src0);
- const int nc = src0->ne[0];
+ const int nr = ggml_nrows(src0);
+ const int64_t ne0 = src0->ne[0];
+ const int64_t ne1 = src0->ne[1];
+ const int64_t ne2 = src0->ne[2];
const size_t nb00 = src0->nb[0];
const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
const size_t nb10 = src1->nb[0];
const size_t nb11 = src1->nb[1];
+ const size_t nb12 = src1->nb[2];
+ const size_t nb13 = src1->nb[3];
const size_t nb0 = dst->nb[0];
const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F16);
- GGML_ASSERT(dst->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F16);
GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
if (nb10 == sizeof(ggml_fp16_t)) {
- for (int j = ith; j < n; j += nth) {
- ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j*nb1);
- ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j*nb01);
- for (int i = 0; i < nc; i++) {
- ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + j*nb11 + i*nb10);
- dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(*src1_ptr));
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
+ ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ ggml_fp16_t * src1_ptr = (ggml_fp16_t *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
+
+ for (int i = 0; i < ne0; i++) {
+ dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + GGML_FP16_TO_FP32(src1_ptr[i]));
}
}
}
return;
}
+ const int nr = ggml_nrows(src0);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
- const int64_t ne03 = src0->ne[3];
-
- //const int64_t ne10 = src1->ne[0];
- //const int64_t ne11 = src1->ne[1];
- const int64_t ne12 = src1->ne[2];
- const int64_t ne13 = src1->ne[3];
-
- //const int64_t ne0 = dst->ne[0];
- //const int64_t ne1 = dst->ne[1];
- const int64_t ne2 = dst->ne[2];
- const int64_t ne3 = dst->ne[3];
+ //const int64_t ne03 = src0->ne[3];
- const int nb00 = src0->nb[0];
- const int nb01 = src0->nb[1];
- const int nb02 = src0->nb[2];
- const int nb03 = src0->nb[3];
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
- const int nb10 = src1->nb[0];
- const int nb11 = src1->nb[1];
- const int nb12 = src1->nb[2];
- const int nb13 = src1->nb[3];
+ const size_t nb10 = src1->nb[0];
+ const size_t nb11 = src1->nb[1];
+ const size_t nb12 = src1->nb[2];
+ const size_t nb13 = src1->nb[3];
- const int nb0 = dst->nb[0];
- const int nb1 = dst->nb[1];
- const int nb2 = dst->nb[2];
- const int nb3 = dst->nb[3];
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
const int ith = params->ith;
const int nth = params->nth;
- GGML_ASSERT(ne02 == ne12);
- GGML_ASSERT(ne03 == ne13);
- GGML_ASSERT(ne2 == ne12);
- GGML_ASSERT(ne3 == ne13);
-
const enum ggml_type type = src0->type;
dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
quantize_row_q_t const quantize_row_q = quantize_fns[type].quantize_row_q;
// we don't support permuted src0 or src1
- GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[type]);
+ GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
GGML_ASSERT(nb10 == sizeof(float));
// dst cannot be transposed or permuted
GGML_ASSERT(dst->type == src0->type);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
- // total rows in src0
- const int nr = ne01*ne02*ne03;
-
// rows per thread
const int dr = (nr + nth - 1)/nth;
} break;
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
- case GGML_TYPE_Q4_2:
case GGML_TYPE_Q5_0:
case GGML_TYPE_Q5_1:
case GGML_TYPE_Q8_0:
}
}
-// ggml_compute_forward_sub
+// ggml_compute_forward_add1
+
+static void ggml_compute_forward_add1_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ GGML_ASSERT(ggml_is_scalar(src1));
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(src0);
+ const int64_t ne0 = src0->ne[0];
+ const int64_t ne1 = src0->ne[1];
+ const int64_t ne2 = src0->ne[2];
+
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ GGML_ASSERT( nb0 == sizeof(float));
+ GGML_ASSERT(nb00 == sizeof(float));
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+#ifdef GGML_USE_ACCELERATE
+ UNUSED(ggml_vec_add1_f32);
+
+ vDSP_vadd(
+ (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
+ (float *) ((char *) src1->data), 0,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
+ ne0);
+#else
+ ggml_vec_add1_f32(ne0,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
+ (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
+ *(float *) src1->data);
+#endif
+ }
+}
+
+static void ggml_compute_forward_add1_f16_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ GGML_ASSERT(ggml_is_scalar(src1));
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // scalar to add
+ const float v = *(float *) src1->data;
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(src0);
+ const int64_t ne0 = src0->ne[0];
+ const int64_t ne1 = src0->ne[1];
+ const int64_t ne2 = src0->ne[2];
+
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F16);
+
+ GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
+ ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ for (int i = 0; i < ne0; i++) {
+ dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
+ }
+ }
+}
+
+static void ggml_compute_forward_add1_f16_f16(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ GGML_ASSERT(ggml_is_scalar(src1));
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // scalar to add
+ const float v = GGML_FP16_TO_FP32(*(ggml_fp16_t *) src1->data);
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(src0);
+ const int64_t ne0 = src0->ne[0];
+ const int64_t ne1 = src0->ne[1];
+ const int64_t ne2 = src0->ne[2];
+
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F16);
+
+ GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
+ ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ for (int i = 0; i < ne0; i++) {
+ dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + v);
+ }
+ }
+}
+
+static void ggml_compute_forward_add1_q_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ GGML_ASSERT(ggml_is_scalar(src1));
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // scalar to add
+ const float v = *(float *) src1->data;
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(src0);
+ const int64_t ne0 = src0->ne[0];
+ const int64_t ne1 = src0->ne[1];
+ const int64_t ne2 = src0->ne[2];
+
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ const enum ggml_type type = src0->type;
+ dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
+ quantize_row_q_t const quantize_row_q = quantize_fns[type].quantize_row_q;
+
+ // we don't support permuted src0
+ GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
+
+ // dst cannot be transposed or permuted
+ GGML_ASSERT(nb0 <= nb1);
+ GGML_ASSERT(nb1 <= nb2);
+ GGML_ASSERT(nb2 <= nb3);
+
+ GGML_ASSERT(ggml_is_quantized(src0->type));
+ GGML_ASSERT(dst->type == src0->type);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
+
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ void * src0_row = (void *) ((char *) src0->data + (i1*nb01 + i2*nb02 + i3*nb03));
+ void * dst_row = (void *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb0 ));
+
+ assert(ne0 % 32 == 0);
+
+ // unquantize row from src0 to temp buffer
+ dequantize_row_q(src0_row, wdata, ne0);
+ // add src1
+ ggml_vec_acc1_f32(ne0, wdata, v);
+ // quantize row to dst
+ quantize_row_q(wdata, dst_row, ne0);
+ }
+}
+
+static void ggml_compute_forward_add1(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_add1_f32(params, src0, src1, dst);
+ } break;
+ case GGML_TYPE_F16:
+ {
+ if (src1->type == GGML_TYPE_F16) {
+ ggml_compute_forward_add1_f16_f16(params, src0, src1, dst);
+ }
+ else if (src1->type == GGML_TYPE_F32) {
+ ggml_compute_forward_add1_f16_f32(params, src0, src1, dst);
+ }
+ else {
+ GGML_ASSERT(false);
+ }
+ } break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q8_1:
+ {
+ ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+
+// ggml_compute_forward_acc
+
+static void ggml_compute_forward_acc_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ const struct ggml_tensor * opt0,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
+
+ GGML_ASSERT(opt0->type == GGML_TYPE_I32);
+ GGML_ASSERT(ggml_nelements(opt0) == 5);
+
+ // view src0 and dst with these strides and data offset inbytes during acc
+ // nb0 is implicitely element_size because src0 and dst are contiguous
+ size_t nb1 = ((int32_t *) opt0->data)[0];
+ size_t nb2 = ((int32_t *) opt0->data)[1];
+ size_t nb3 = ((int32_t *) opt0->data)[2];
+ size_t offset = ((int32_t *) opt0->data)[3];
+ bool inplace = (bool) ((int32_t *) opt0->data)[4];
+
+ if (!inplace && (params->type == GGML_TASK_INIT)) {
+ // memcpy needs to be synchronized across threads to avoid race conditions.
+ // => do it in INIT phase
+ memcpy(
+ ((char *) dst->data),
+ ((char *) src0->data),
+ ggml_nbytes(dst));
+ }
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(src1);
+ const int nc = src1->ne[0];
+
+ const int64_t ne10 = src1->ne[0];
+ const int64_t ne11 = src1->ne[1];
+ const int64_t ne12 = src1->ne[2];
+ const int64_t ne13 = src1->ne[3];
+
+ const size_t nb10 = src1->nb[0];
+ const size_t nb11 = src1->nb[1];
+ const size_t nb12 = src1->nb[2];
+ const size_t nb13 = src1->nb[3];
+
+ // src0 and dst as viewed during acc
+ const size_t nb0 = ggml_element_size(src0);
+
+ const size_t nb00 = nb0;
+ const size_t nb01 = nb1;
+ const size_t nb02 = nb2;
+ const size_t nb03 = nb3;
+
+ GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb0 + (ne11 == 0 ? 0 : ne11-1)*nb1 + (ne12 == 0 ? 0 : ne12-1)*nb2 + (ne13 == 0 ? 0 : ne13-1)*nb3 < ggml_nbytes(dst));
+ GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10-1)*nb00 + (ne11 == 0 ? 0 : ne11-1)*nb01 + (ne12 == 0 ? 0 : ne12-1)*nb02 + (ne13 == 0 ? 0 : ne13-1)*nb03 < ggml_nbytes(src0));
+
+ GGML_ASSERT(nb10 == sizeof(float));
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0 and dst are viewed with shape of src1 and offset
+ // => same indices
+ const int i3 = ir/(ne12*ne11);
+ const int i2 = (ir - i3*ne12*ne11)/ne11;
+ const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
+
+#ifdef GGML_USE_ACCELERATE
+ vDSP_vadd(
+ (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset), 1,
+ (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset), 1, nc);
+#else
+ ggml_vec_add_f32(nc,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
+ (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + offset),
+ (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
+#endif
+ }
+}
+
+static void ggml_compute_forward_acc(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ const struct ggml_tensor * opt0,
+ struct ggml_tensor * dst) {
+
+ switch (src0->type) {
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_acc_f32(params, src0, src1, opt0, dst);
+ } break;
+ case GGML_TYPE_F16:
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q8_1:
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+// ggml_compute_forward_sub
static void ggml_compute_forward_sub_f32(
const struct ggml_compute_params * params,
return;
}
- const int n = ggml_nrows(src0);
- const int nc = src0->ne[0];
+ const int nr = ggml_nrows(src0);
+ const int64_t ne0 = src0->ne[0];
+ const int64_t ne1 = src0->ne[1];
+ const int64_t ne2 = src0->ne[2];
- assert( dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
- assert(src1->nb[0] == sizeof(float));
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
- for (int i = 0; i < n; i++) {
- ggml_vec_sub_f32(nc,
- (float *) ((char *) dst->data + i*( dst->nb[1])),
- (float *) ((char *) src0->data + i*(src0->nb[1])),
- (float *) ((char *) src1->data + i*(src1->nb[1])));
+ const size_t nb10 = src1->nb[0];
+ const size_t nb11 = src1->nb[1];
+ const size_t nb12 = src1->nb[2];
+ const size_t nb13 = src1->nb[3];
+
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ GGML_ASSERT( nb0 == sizeof(float));
+ GGML_ASSERT(nb00 == sizeof(float));
+
+ if (nb10 == sizeof(float)) {
+ for (int ir = 0; ir < nr; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+
+#ifdef GGML_USE_ACCELERATE
+ vDSP_vsub(
+ (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
+ (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
+ ne0);
+#else
+ ggml_vec_sub_f32(ne0,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
+ (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
+ (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
+#endif
+ // }
+ // }
+ }
+ } else {
+ // src1 is not contiguous
+ for (int ir = 0; ir < nr; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
+ float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ for (int i0 = 0; i0 < ne0; i0++) {
+ float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
+
+ dst_ptr[i0] = src0_ptr[i0] - *src1_ptr;
+ }
+ }
}
}
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
+ assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(src0);
+ const int64_t ne0 = src0->ne[0];
+ const int64_t ne1 = src0->ne[1];
+ const int64_t ne2 = src0->ne[2];
+
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const size_t nb10 = src1->nb[0];
+ const size_t nb11 = src1->nb[1];
+ const size_t nb12 = src1->nb[2];
+ const size_t nb13 = src1->nb[3];
+
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ GGML_ASSERT( nb0 == sizeof(float));
+ GGML_ASSERT(nb00 == sizeof(float));
+
+ if (nb10 == sizeof(float)) {
+ for (int ir = ith; ir < nr; ir += nth) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+
+#ifdef GGML_USE_ACCELERATE
+ UNUSED(ggml_vec_mul_f32);
+
+ vDSP_vmul(
+ (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
+ (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
+ ne0);
+#else
+ ggml_vec_mul_f32(ne0,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
+ (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
+ (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
+#endif
+ // }
+ // }
+ }
+ } else {
+ // src1 is not contiguous
+ for (int ir = ith; ir < nr; ir += nth) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
+ float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ for (int i0 = 0; i0 < ne0; i0++) {
+ float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
+
+ dst_ptr[i0] = src0_ptr[i0] * (*src1_ptr);
+ }
+ }
+ }
+}
+
+static void ggml_compute_forward_mul(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_mul_f32(params, src0, src1, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+// ggml_compute_forward_div
+
+static void ggml_compute_forward_div_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
return;
}
- const int n = ggml_nrows(src0);
- const int nc = src0->ne[0];
+ const int nr = ggml_nrows(src0);
+ const int64_t ne0 = src0->ne[0];
+ const int64_t ne1 = src0->ne[1];
+ const int64_t ne2 = src0->ne[2];
- assert( dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
- assert(src1->nb[0] == sizeof(float));
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
- for (int i = 0; i < n; i++) {
- ggml_vec_mul_f32(nc,
- (float *) ((char *) dst->data + i*( dst->nb[1])),
- (float *) ((char *) src0->data + i*(src0->nb[1])),
- (float *) ((char *) src1->data + i*(src1->nb[1])));
+ const size_t nb10 = src1->nb[0];
+ const size_t nb11 = src1->nb[1];
+ const size_t nb12 = src1->nb[2];
+ const size_t nb13 = src1->nb[3];
+
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ GGML_ASSERT( nb0 == sizeof(float));
+ GGML_ASSERT(nb00 == sizeof(float));
+
+ if (nb10 == sizeof(float)) {
+ for (int ir = 0; ir < nr; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+
+#ifdef GGML_USE_ACCELERATE
+ vDSP_vdiv(
+ (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
+ (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01), 1,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ), 1,
+ ne0);
+#else
+ ggml_vec_div_f32(ne0,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 ),
+ (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01),
+ (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
+#endif
+ // }
+ // }
+ }
+ } else {
+ // src1 is not contiguous
+ for (int ir = 0; ir < nr; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 );
+ float * src0_ptr = (float *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ for (int i0 = 0; i0 < ne0; i0++) {
+ float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11 + i0*nb10);
+
+ dst_ptr[i0] = src0_ptr[i0] / (*src1_ptr);
+ }
+ }
}
}
-static void ggml_compute_forward_mul(
+static void ggml_compute_forward_div(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
switch (src0->type) {
case GGML_TYPE_F32:
{
- ggml_compute_forward_mul_f32(params, src0, src1, dst);
+ ggml_compute_forward_div_f32(params, src0, src1, dst);
} break;
default:
{
}
}
-// ggml_compute_forward_div
+// ggml_compute_forward_sqr
-static void ggml_compute_forward_div_f32(
+static void ggml_compute_forward_sqr_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
- const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
assert(params->ith == 0);
- assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
+ assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;
}
- const int n = ggml_nrows(src0);
- const int nc = src0->ne[0];
+ const int n = ggml_nrows(src0);
+ const int nc = src0->ne[0];
assert( dst->nb[0] == sizeof(float));
assert(src0->nb[0] == sizeof(float));
- assert(src1->nb[0] == sizeof(float));
for (int i = 0; i < n; i++) {
- ggml_vec_div_f32(nc,
+ ggml_vec_sqr_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
- (float *) ((char *) src0->data + i*(src0->nb[1])),
- (float *) ((char *) src1->data + i*(src1->nb[1])));
+ (float *) ((char *) src0->data + i*(src0->nb[1])));
}
}
-static void ggml_compute_forward_div(
+static void ggml_compute_forward_sqr(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
- const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
switch (src0->type) {
case GGML_TYPE_F32:
{
- ggml_compute_forward_div_f32(params, src0, src1, dst);
+ ggml_compute_forward_sqr_f32(params, src0, dst);
} break;
default:
{
}
}
-// ggml_compute_forward_sqr
+// ggml_compute_forward_sqrt
-static void ggml_compute_forward_sqr_f32(
+static void ggml_compute_forward_sqrt_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
struct ggml_tensor * dst) {
return;
}
- const int n = ggml_nrows(src0);
- const int nc = src0->ne[0];
+ const int n = ggml_nrows(src0);
+ const int nc = src0->ne[0];
assert( dst->nb[0] == sizeof(float));
assert(src0->nb[0] == sizeof(float));
for (int i = 0; i < n; i++) {
- ggml_vec_sqr_f32(nc,
+ ggml_vec_sqrt_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
(float *) ((char *) src0->data + i*(src0->nb[1])));
}
}
-static void ggml_compute_forward_sqr(
+static void ggml_compute_forward_sqrt(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
struct ggml_tensor * dst) {
switch (src0->type) {
case GGML_TYPE_F32:
{
- ggml_compute_forward_sqr_f32(params, src0, dst);
+ ggml_compute_forward_sqrt_f32(params, src0, dst);
} break;
default:
{
}
}
-// ggml_compute_forward_sqrt
-static void ggml_compute_forward_sqrt_f32(
+// ggml_compute_forward_log
+
+static void ggml_compute_forward_log_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
struct ggml_tensor * dst) {
- assert(params->ith == 0);
- assert(ggml_are_same_shape(src0, dst));
+ GGML_ASSERT(params->ith == 0);
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert( dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
+ GGML_ASSERT( dst->nb[0] == sizeof(float));
+ GGML_ASSERT(src0->nb[0] == sizeof(float));
for (int i = 0; i < n; i++) {
- ggml_vec_sqrt_f32(nc,
+ ggml_vec_log_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
(float *) ((char *) src0->data + i*(src0->nb[1])));
}
}
-static void ggml_compute_forward_sqrt(
+static void ggml_compute_forward_log(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
struct ggml_tensor * dst) {
switch (src0->type) {
case GGML_TYPE_F32:
{
- ggml_compute_forward_sqrt_f32(params, src0, dst);
+ ggml_compute_forward_log_f32(params, src0, dst);
} break;
default:
{
}
}
+// ggml_compute_forward_sum_rows
+
+static void ggml_compute_forward_sum_rows_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(params->ith == 0);
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ GGML_ASSERT(src0->nb[0] == sizeof(float));
+ GGML_ASSERT(dst->nb[0] == sizeof(float));
+
+ const int64_t ne00 = src0->ne[0];
+ const int64_t ne01 = src0->ne[1];
+ const int64_t ne02 = src0->ne[2];
+ const int64_t ne03 = src0->ne[3];
+
+ const int64_t ne0 = dst->ne[0];
+ const int64_t ne1 = dst->ne[1];
+ const int64_t ne2 = dst->ne[2];
+ const int64_t ne3 = dst->ne[3];
+
+ GGML_ASSERT(ne0 == 1);
+ GGML_ASSERT(ne1 == ne01);
+ GGML_ASSERT(ne2 == ne02);
+ GGML_ASSERT(ne3 == ne03);
+
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ for (int64_t i3 = 0; i3 < ne03; i3++) {
+ for (int64_t i2 = 0; i2 < ne02; i2++) {
+ for (int64_t i1 = 0; i1 < ne01; i1++) {
+ float* src_row = (float *) ((char *) src0->data + i1*nb01 + i2*nb02 + i3*nb03);
+ float* dst_row = (float *) ((char *) dst->data + i1*nb1 + i2*nb2 + i3*nb3);
+ float row_sum = 0;
+ ggml_vec_sum_f32(ne00, &row_sum, src_row);
+ dst_row[0] = row_sum;
+ }
+ }
+ }
+}
+
+static void ggml_compute_forward_sum_rows(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_sum_rows_f32(params, src0, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
// ggml_compute_forward_mean
static void ggml_compute_forward_mean_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
struct ggml_tensor * dst) {
- assert(params->ith == 0);
- assert(ggml_can_repeat(src0, dst));
+ GGML_ASSERT(params->ith == 0);
+ GGML_ASSERT(ggml_can_repeat(src0, dst));
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;
}
- // TODO: implement support for rank > 2 tensors
- assert(src0->ne[2] == 1);
- assert(src0->ne[3] == 1);
- assert( dst->ne[2] == 1);
- assert( dst->ne[3] == 1);
+ const int64_t ne0 = dst->ne[0];
+ const int64_t ne1 = dst->ne[1];
+ const int64_t ne2 = dst->ne[2];
+ const int64_t ne3 = dst->ne[3];
+
+ const int64_t ne00 = src0->ne[0];
+ const int64_t ne01 = src0->ne[1];
+ const int64_t ne02 = src0->ne[2];
+ const int64_t ne03 = src0->ne[3];
+
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
- const int nc = dst->ne[0];
- const int nr = dst->ne[1];
- const int nc0 = src0->ne[0];
- const int nr0 = src0->ne[1];
- const int ncr = nc/nc0; // guaranteed to be an integer due to the check in ggml_can_repeat
- const int nrr = nr/nr0; // guaranteed to be an integer due to the check in ggml_can_repeat
+ // guaranteed to be an integer due to the check in ggml_can_repeat
+ const int nr0 = (int)(ne0/ne00);
+ const int nr1 = (int)(ne1/ne01);
+ const int nr2 = (int)(ne2/ne02);
+ const int nr3 = (int)(ne3/ne03);
// TODO: support for transposed / permuted tensors
- assert( dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
+ GGML_ASSERT(nb0 == sizeof(float));
+ GGML_ASSERT(nb00 == sizeof(float));
// TODO: maybe this is not optimal?
- for (int i = 0; i < nrr; i++) {
- for (int j = 0; j < ncr; j++) {
- for (int k = 0; k < nr0; k++) {
- ggml_vec_cpy_f32(nc0,
- (float *) ((char *) dst->data + (i*nr0 + k)*( dst->nb[1]) + j*nc0*( dst->nb[0])),
- (float *) ((char *) src0->data + ( k)*(src0->nb[1])));
+ for (int i3 = 0; i3 < nr3; i3++) {
+ for (int k3 = 0; k3 < ne03; k3++) {
+ for (int i2 = 0; i2 < nr2; i2++) {
+ for (int k2 = 0; k2 < ne02; k2++) {
+ for (int i1 = 0; i1 < nr1; i1++) {
+ for (int k1 = 0; k1 < ne01; k1++) {
+ for (int i0 = 0; i0 < nr0; i0++) {
+ ggml_vec_cpy_f32(ne00,
+ (float *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0),
+ (float *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01));
+ }
+ }
+ }
+ }
}
}
}
}
+// ggml_compute_forward_silu_back
+
+static void ggml_compute_forward_silu_back_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * grad,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(ggml_is_contiguous(grad));
+ GGML_ASSERT(ggml_is_contiguous(src0));
+ GGML_ASSERT(ggml_is_contiguous(dst));
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ GGML_ASSERT(ggml_are_same_shape(src0, grad));
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nc = src0->ne[0];
+ const int nr = ggml_nrows(src0);
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ for (int i1 = ir0; i1 < ir1; i1++) {
+ ggml_vec_silu_backward_f32(nc,
+ (float *) ((char *) dst->data + i1*( dst->nb[1])),
+ (float *) ((char *) src0->data + i1*(src0->nb[1])),
+ (float *) ((char *) grad->data + i1*(grad->nb[1])));
+
+#ifndef NDEBUG
+ for (int k = 0; k < nc; k++) {
+ const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
+ UNUSED(x);
+ assert(!isnan(x));
+ assert(!isinf(x));
+ }
+#endif
+ }
+}
+
+static void ggml_compute_forward_silu_back(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * grad,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_silu_back_f32(params, src0, grad, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
// ggml_compute_forward_norm
-static void ggml_compute_forward_norm_f32(
+static void ggml_compute_forward_norm_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ GGML_ASSERT(src0->nb[0] == sizeof(float));
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int64_t ne00 = src0->ne[0];
+ const int64_t ne01 = src0->ne[1];
+ const int64_t ne02 = src0->ne[2];
+ const int64_t ne03 = src0->ne[3];
+
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ const float eps = 1e-5f; // TODO: make this a parameter
+
+ // TODO: optimize
+ for (int64_t i03 = 0; i03 < ne03; i03++) {
+ for (int64_t i02 = 0; i02 < ne02; i02++) {
+ for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
+ const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
+
+ ggml_float sum = 0.0;
+ for (int64_t i00 = 0; i00 < ne00; i00++) {
+ sum += (ggml_float)x[i00];
+ }
+
+ float mean = sum/ne00;
+
+ float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
+
+ ggml_float sum2 = 0.0;
+ for (int64_t i00 = 0; i00 < ne00; i00++) {
+ float v = x[i00] - mean;
+ y[i00] = v;
+ sum2 += (ggml_float)(v*v);
+ }
+
+ float variance = sum2/ne00;
+ const float scale = 1.0f/sqrtf(variance + eps);
+
+ ggml_vec_scale_f32(ne00, y, scale);
+ }
+ }
+ }
+}
+
+static void ggml_compute_forward_norm(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_norm_f32(params, src0, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+static void ggml_compute_forward_rms_norm_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
struct ggml_tensor * dst) {
const size_t nb2 = dst->nb[2];
const size_t nb3 = dst->nb[3];
- const float eps = 1e-5f; // TODO: make this a parameter
+ const float eps = 1e-6f; // TODO: make this a parameter
// TODO: optimize
for (int64_t i03 = 0; i03 < ne03; i03++) {
ggml_float sum = 0.0;
for (int64_t i00 = 0; i00 < ne00; i00++) {
- sum += (ggml_float)x[i00];
+ sum += (ggml_float)(x[i00] * x[i00]);
}
float mean = sum/ne00;
float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
- ggml_float sum2 = 0.0;
- for (int64_t i00 = 0; i00 < ne00; i00++) {
- float v = x[i00] - mean;
- y[i00] = v;
- sum2 += (ggml_float)(v*v);
- }
+ memcpy(y, x, ne00 * sizeof(float));
+ // for (int i00 = 0; i00 < ne00; i00++) {
+ // y[i00] = x[i00];
+ // }
- float variance = sum2/ne00;
- const float scale = 1.0f/sqrtf(variance + eps);
+ const float scale = 1.0f/sqrtf(mean + eps);
ggml_vec_scale_f32(ne00, y, scale);
}
}
}
-static void ggml_compute_forward_norm(
+static void ggml_compute_forward_rms_norm(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
struct ggml_tensor * dst) {
switch (src0->type) {
case GGML_TYPE_F32:
{
- ggml_compute_forward_norm_f32(params, src0, dst);
+ ggml_compute_forward_rms_norm_f32(params, src0, dst);
} break;
default:
{
}
}
-static void ggml_compute_forward_rms_norm_f32(
+
+static void ggml_compute_forward_rms_norm_back_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
- GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;
const size_t nb02 = src0->nb[2];
const size_t nb03 = src0->nb[3];
+ const size_t nb11 = src1->nb[1];
+ const size_t nb12 = src1->nb[2];
+ const size_t nb13 = src1->nb[3];
+
const size_t nb1 = dst->nb[1];
const size_t nb2 = dst->nb[2];
const size_t nb3 = dst->nb[3];
for (int64_t i03 = 0; i03 < ne03; i03++) {
for (int64_t i02 = 0; i02 < ne02; i02++) {
for (int64_t i01 = ith; i01 < ne01; i01 += nth) {
+ // src1 is same shape as src0 => same indices
+ const int64_t i11 = i01;
+ const int64_t i12 = i02;
+ const int64_t i13 = i03;
+
const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
+ const float * dz = (float *) ((char *) src1->data + i11*nb11 + i12*nb12 + i13*nb13);
+
+ ggml_float sum_xx = 0.0;
+ ggml_float sum_xdz = 0.0;
- ggml_float sum = 0.0;
for (int64_t i00 = 0; i00 < ne00; i00++) {
- sum += (ggml_float)(x[i00] * x[i00]);
+ sum_xx += (ggml_float)(x[i00] * x[i00]);
+ sum_xdz += (ggml_float)(x[i00] * dz[i00]);
}
- float mean = sum/ne00;
-
- float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
-
- memcpy(y, x, ne00 * sizeof(float));
- // for (int i00 = 0; i00 < ne00; i00++) {
- // y[i00] = x[i00];
- // }
-
- const float scale = 1.0f/sqrtf(mean + eps);
+ //const float mean = (float)(sum_xx)/ne00;
+ const float mean_eps = (float)(sum_xx)/ne00 + eps;
+ const float sum_eps = (float)(sum_xx) + eps*ne00;
+ //const float mean_xdz = (float)(sum_xdz)/ne00;
+ // we could cache rms from forward pass to improve performance.
+ // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms.
+ //const float rms = sqrtf(mean_eps);
+ const float rrms = 1.0f / sqrtf(mean_eps);
+ //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3)
- ggml_vec_scale_f32(ne00, y, scale);
+ {
+ // z = rms_norm(x)
+ //
+ // rms_norm(src0) =
+ // scale(
+ // src0,
+ // div(
+ // 1,
+ // sqrt(
+ // add(
+ // scale(
+ // sum(
+ // sqr(
+ // src0)),
+ // (1.0/N)),
+ // eps))));
+
+ // postorder:
+ // ## op args grad
+ // 00 param src0 grad[#00]
+ // 01 const 1
+ // 02 sqr (#00) grad[#02]
+ // 03 sum (#02) grad[#03]
+ // 04 const 1/N
+ // 05 scale (#03, #04) grad[#05]
+ // 06 const eps
+ // 07 add (#05, #06) grad[#07]
+ // 08 sqrt (#07) grad[#08]
+ // 09 div (#01,#08) grad[#09]
+ // 10 scale (#00,#09) grad[#10]
+ //
+ // backward pass, given grad[#10]
+ // #10: scale
+ // grad[#00] += scale(grad[#10],#09)
+ // grad[#09] += sum(mul(grad[#10],#00))
+ // #09: div
+ // grad[#08] += neg(mul(grad[#09], div(#09,#08)))
+ // #08: sqrt
+ // grad[#07] += mul(grad[#08], div(0.5, #08))
+ // #07: add
+ // grad[#05] += grad[#07]
+ // #05: scale
+ // grad[#03] += scale(grad[#05],#04)
+ // #03: sum
+ // grad[#02] += repeat(grad[#03], #02)
+ // #02:
+ // grad[#00] += scale(mul(#00, grad[#02]), 2.0)
+ //
+ // substitute and simplify:
+ // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
+ // grad[#02] = repeat(grad[#03], #02)
+ // grad[#02] = repeat(scale(grad[#05],#04), #02)
+ // grad[#02] = repeat(scale(grad[#07],#04), #02)
+ // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02)
+ // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02)
+ // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02)
+ // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02)
+ // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02)
+ // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02)
+ // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)
+ // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0)
+ // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0)
+ // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0)
+ // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N)))
+ // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
+ // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N))
+ // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N))
+ // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps))
+ // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps)))
+ // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps))
+ // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps))
+ // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps))
+ // a = b*c + d*e
+ // a = b*c*f/f + d*e*f/f
+ // a = (b*c*f + d*e*f)*(1/f)
+ // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c))
+ // a = (b + d*e/c)*c
+ // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps)
+ // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms
+ // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms
+ // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms
+ // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms
+ // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms
+ // a = (dz + x*div(-mean_xdz,mean_eps))*rrms
+ // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms)
+ // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
+ // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
+ }
+ // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms)
+ // post-order:
+ // dx := x
+ // dx := scale(dx,-mean_xdz/mean_eps)
+ // dx := add(dx, dz)
+ // dx := scale(dx, rrms)
+ float * dx = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
+
+ ggml_vec_cpy_f32 (ne00, dx, x);
+ // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps);
+ ggml_vec_scale_f32(ne00, dx, (float)(-sum_xdz)/sum_eps);
+ ggml_vec_acc_f32 (ne00, dx, dz);
+ ggml_vec_scale_f32(ne00, dx, rrms);
}
}
}
}
-static void ggml_compute_forward_rms_norm(
+static void ggml_compute_forward_rms_norm_back(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
switch (src0->type) {
case GGML_TYPE_F32:
{
- ggml_compute_forward_rms_norm_f32(params, src0, dst);
+ ggml_compute_forward_rms_norm_back_f32(params, src0, src1, dst);
} break;
default:
{
switch (src0->type) {
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
- case GGML_TYPE_Q4_2:
case GGML_TYPE_Q5_0:
case GGML_TYPE_Q5_1:
case GGML_TYPE_Q8_0:
const int ir0 = dr*ith;
const int ir1 = MIN(ir0 + dr, nr);
+ const size_t nb01 = src0->nb[1];
+
+ const size_t nb1 = dst->nb[1];
+
+
for (int i1 = ir0; i1 < ir1; i1++) {
- ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*(dst->nb[1])), v);
+ if (dst->data != src0->data) {
+ // src0 is same shape as dst => same indices
+ memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
+ }
+ ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
}
}
}
}
+// ggml_compute_forward_set
+
+static void ggml_compute_forward_set_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ const struct ggml_tensor * opt0,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
+
+ GGML_ASSERT(opt0->type == GGML_TYPE_I32);
+ GGML_ASSERT(ggml_nelements(opt0) == 5);
+
+ // view src0 and dst with these strides and data offset inbytes during set
+ // nb0 is implicitely element_size because src0 and dst are contiguous
+ size_t nb1 = ((int32_t *) opt0->data)[0];
+ size_t nb2 = ((int32_t *) opt0->data)[1];
+ size_t nb3 = ((int32_t *) opt0->data)[2];
+ size_t offset = ((int32_t *) opt0->data)[3];
+ bool inplace = (bool) ((int32_t *) opt0->data)[4];
+
+ if (!inplace && (params->type == GGML_TASK_INIT)) {
+ // memcpy needs to be synchronized across threads to avoid race conditions.
+ // => do it in INIT phase
+ memcpy(
+ ((char *) dst->data),
+ ((char *) src0->data),
+ ggml_nbytes(dst));
+ }
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(src1);
+ const int nc = src1->ne[0];
+
+ const int64_t ne10 = src1->ne[0];
+ const int64_t ne11 = src1->ne[1];
+ const int64_t ne12 = src1->ne[2];
+ const int64_t ne13 = src1->ne[3];
+
+ const size_t nb10 = src1->nb[0];
+ const size_t nb11 = src1->nb[1];
+ const size_t nb12 = src1->nb[2];
+ const size_t nb13 = src1->nb[3];
+
+ // src0 and dst as viewed during set
+ const size_t nb0 = ggml_element_size(src0);
+
+ const int im0 = (ne10 == 0 ? 0 : ne10-1);
+ const int im1 = (ne11 == 0 ? 0 : ne11-1);
+ const int im2 = (ne12 == 0 ? 0 : ne12-1);
+ const int im3 = (ne13 == 0 ? 0 : ne13-1);
+
+ GGML_ASSERT(offset + im0*nb0 + im1*nb1 + im2*nb2 + im3*nb3 < ggml_nbytes(dst));
+
+ GGML_ASSERT(nb10 == sizeof(float));
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0 and dst are viewed with shape of src1 and offset
+ // => same indices
+ const int i3 = ir/(ne12*ne11);
+ const int i2 = (ir - i3*ne12*ne11)/ne11;
+ const int i1 = (ir - i3*ne12*ne11 - i2*ne11);
+
+ ggml_vec_cpy_f32(nc,
+ (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + offset),
+ (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11));
+ }
+}
+
+static void ggml_compute_forward_set(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ const struct ggml_tensor * opt0,
+ struct ggml_tensor * dst) {
+
+ switch (src0->type) {
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_set_f32(params, src0, src1, opt0, dst);
+ } break;
+ case GGML_TYPE_F16:
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q8_1:
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
// ggml_compute_forward_cpy
static void ggml_compute_forward_cpy(
switch (src0->type) {
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
- case GGML_TYPE_Q4_2:
case GGML_TYPE_Q5_0:
case GGML_TYPE_Q5_1:
case GGML_TYPE_Q8_0:
//}
}
-// ggml_compute_forward_diag_mask_inf
+// ggml_compute_forward_get_rows_back
+
+static void ggml_compute_forward_get_rows_back_f32_f16(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ const struct ggml_tensor * opt0,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(params->ith == 0);
+ GGML_ASSERT(ggml_are_same_shape(opt0, dst));
+ GGML_ASSERT(ggml_is_contiguous(opt0));
+ GGML_ASSERT(ggml_is_contiguous(dst));
+
+ ggml_compute_forward_dup_same_cont(params, opt0, dst);
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int nc = src0->ne[0];
+ const int nr = ggml_nelements(src1);
+
+ GGML_ASSERT( dst->ne[0] == nc);
+ GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t));
+
+ for (int i = 0; i < nr; ++i) {
+ const int r = ((int32_t *) src1->data)[i];
+
+ for (int j = 0; j < nc; ++j) {
+ ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i*src0->nb[1]))[j];
+ ((float *) ((char *) dst->data + r*dst->nb[1]))[j] += GGML_FP16_TO_FP32(v);
+ }
+ }
+}
+
+static void ggml_compute_forward_get_rows_back_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ const struct ggml_tensor * opt0,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(params->ith == 0);
+ GGML_ASSERT(ggml_are_same_shape(opt0, dst));
+ GGML_ASSERT(ggml_is_contiguous(opt0));
+ GGML_ASSERT(ggml_is_contiguous(dst));
+
+ ggml_compute_forward_dup_same_cont(params, opt0, dst);
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int nc = src0->ne[0];
+ const int nr = ggml_nelements(src1);
+
+ GGML_ASSERT( dst->ne[0] == nc);
+ GGML_ASSERT(src0->nb[0] == sizeof(float));
+
+ for (int i = 0; i < nr; ++i) {
+ const int r = ((int32_t *) src1->data)[i];
+
+ ggml_vec_add_f32(nc,
+ (float *) ((char *) dst->data + r*dst->nb[1]),
+ (float *) ((char *) dst->data + r*dst->nb[1]),
+ (float *) ((char *) src0->data + i*src0->nb[1]));
+ }
+}
+
-static void ggml_compute_forward_diag_mask_inf_f32(
+static void ggml_compute_forward_get_rows_back(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
+ const struct ggml_tensor * opt0,
struct ggml_tensor * dst) {
- assert(params->ith == 0);
+ switch (src0->type) {
+ case GGML_TYPE_F16:
+ {
+ ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, opt0, dst);
+ } break;
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_get_rows_back_f32(params, src0, src1, opt0, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+
+ //static bool first = true;
+ //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
+ //if (first) {
+ // first = false;
+ //} else {
+ // for (int k = 0; k < dst->ne[1]; ++k) {
+ // for (int j = 0; j < dst->ne[0]/16; ++j) {
+ // for (int i = 0; i < 16; ++i) {
+ // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
+ // }
+ // printf("\n");
+ // }
+ // printf("\n");
+ // }
+ // printf("\n");
+ // exit(0);
+ //}
+}
+
+// ggml_compute_forward_diag
+
+static void ggml_compute_forward_diag_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(params->ith == 0);
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // TODO: handle transposed/permuted matrices
+
+ const int ne00 = src0->ne[0];
+ const int ne01 = src0->ne[1];
+ const int ne02 = src0->ne[2];
+ const int ne03 = src0->ne[3];
+ const int ne0 = dst->ne[0];
+ const int ne1 = dst->ne[1];
+ const int ne2 = dst->ne[2];
+ const int ne3 = dst->ne[3];
+ GGML_ASSERT(ne00 == ne0);
+ GGML_ASSERT(ne00 == ne1);
+ GGML_ASSERT(ne01 == 1);
+ GGML_ASSERT(ne02 == ne2);
+ GGML_ASSERT(ne03 == ne3);
+
+ const int nb00 = src0->nb[0];
+ //const int nb01 = src0->nb[1];
+ const int nb02 = src0->nb[2];
+ const int nb03 = src0->nb[3];
+ const int nb0 = dst->nb[0];
+ const int nb1 = dst->nb[1];
+ const int nb2 = dst->nb[2];
+ const int nb3 = dst->nb[3];
+
+ GGML_ASSERT(nb00 == sizeof(float));
+ GGML_ASSERT(nb0 == sizeof(float));
+
+ for (int i3 = 0; i3 < ne3; i3++) {
+ for (int i2 = 0; i2 < ne2; i2++) {
+ for (int i1 = 0; i1 < ne1; i1++) {
+ float * d = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
+ float * s = (float *)((char *) src0->data + i3*nb03 + i2*nb02);
+ for (int i0 = 0; i0 < i1; i0++) {
+ d[i0] = 0;
+ }
+ d[i1] = s[i1];
+ for (int i0 = i1+1; i0 < ne0; i0++) {
+ d[i0] = 0;
+ }
+ }
+ }
+ }
+}
+
+static void ggml_compute_forward_diag(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_diag_f32(params, src0, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+// ggml_compute_forward_diag_mask_inf
+
+static void ggml_compute_forward_diag_mask_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst,
+ const float value) {
assert(src1->type == GGML_TYPE_I32);
- assert(ggml_nelements(src1) == 1);
+ assert(ggml_nelements(src1) == 2);
+
+ const int n_past = ((int32_t *) src1->data)[0];
+ const bool inplace = (bool)((int32_t *) src1->data)[1];
+
+ if (params->type == GGML_TASK_INIT) {
+ // TODO: this hack is not good, need a better way to handle this
+ if (!inplace) {
+ // use the init task to copy src -> dst
+ struct ggml_compute_params params_cpy = *params;
+
+ params_cpy.ith = 0;
+ params_cpy.nth = 1;
+ params_cpy.type = GGML_TASK_COMPUTE;
+
+ ggml_compute_forward_dup_same_cont(¶ms_cpy, src0, dst);
+ }
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;
}
- const int n_past = ((int32_t *) src1->data)[0];
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ assert(n_past >= 0);
// TODO: handle transposed/permuted matrices
assert(src0->nb[0] == sizeof(float));
for (int k = 0; k < nz; k++) {
- for (int j = 0; j < nr; j++) {
+ for (int j = ith; j < nr; j += nth) {
for (int i = n_past; i < nc; i++) {
if (i > n_past + j) {
- *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = -INFINITY;
+ *(float *)((char *) dst->data + k*dst->nb[2] + j*dst->nb[1] + i*dst->nb[0]) = value;
}
}
}
switch (src0->type) {
case GGML_TYPE_F32:
{
- ggml_compute_forward_diag_mask_inf_f32(params, src0, src1, dst);
+ ggml_compute_forward_diag_mask_f32(params, src0, src1, dst, -INFINITY);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+static void ggml_compute_forward_diag_mask_zero(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_diag_mask_f32(params, src0, src1, dst, 0);
} break;
default:
{
const int ir1 = MIN(ir0 + dr, nr);
for (int i1 = ir0; i1 < ir1; i1++) {
- float *p = (float *)((char *) dst->data + i1*dst->nb[1]);
+ float *sp = (float *)((char *) src0->data + i1*src0->nb[1]);
+ float *dp = (float *)((char *) dst->data + i1*dst->nb[1]);
#ifndef NDEBUG
for (int i = 0; i < nc; ++i) {
//printf("p[%d] = %f\n", i, p[i]);
- assert(!isnan(p[i]));
+ assert(!isnan(sp[i]));
}
#endif
float max = -INFINITY;
- ggml_vec_max_f32(nc, &max, p);
+ ggml_vec_max_f32(nc, &max, sp);
ggml_float sum = 0.0;
uint16_t scvt;
for (int i = 0; i < nc; i++) {
- //printf("p[%3d] = %8.4f\n", i, p[i]);
- if (p[i] == -INFINITY) {
- p[i] = 0.0f;
+ if (sp[i] == -INFINITY) {
+ dp[i] = 0.0f;
} else {
- //const float val = (p[i] == -INFINITY) ? 0.0 : exp(p[i] - max);
- ggml_fp16_t s = GGML_FP32_TO_FP16(p[i] - max);
+ // const float val = (sp[i] == -INFINITY) ? 0.0 : exp(sp[i] - max);
+ ggml_fp16_t s = GGML_FP32_TO_FP16(sp[i] - max);
memcpy(&scvt, &s, sizeof(scvt));
const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
sum += (ggml_float)val;
- p[i] = val;
+ dp[i] = val;
}
}
assert(sum > 0.0);
sum = 1.0/sum;
- ggml_vec_scale_f32(nc, p, sum);
+ ggml_vec_scale_f32(nc, dp, sum);
#ifndef NDEBUG
for (int i = 0; i < nc; ++i) {
- assert(!isnan(p[i]));
- assert(!isinf(p[i]));
+ assert(!isnan(dp[i]));
+ assert(!isinf(dp[i]));
}
#endif
}
const int n_past = ((int32_t *) src1->data)[0];
const int n_head = ((int32_t *) src1->data)[1];
+ assert(n_past >= 0);
+
const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
const int ne1 = src0->ne[1]; // seq_len_without_past
//const int ne2 = src0->ne[2]; // n_head -> this is k
m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
}
- pdst[0] = (j+1) * m_k + src[0];
+ pdst[0] = i * m_k + src[0];
}
}
}
const int n_past = ((int32_t *) src1->data)[0];
const int n_head = ((int32_t *) src1->data)[1];
+ assert(n_past >= 0);
+
const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
const int ne1 = src0->ne[1]; // seq_len_without_past
//const int ne2 = src0->ne[2]; // n_head -> this is k
}
// we return F32
- pdst[0] = (j+1) * m_k + GGML_FP16_TO_FP32(src[0]);
+ pdst[0] = i * m_k + GGML_FP16_TO_FP32(src[0]);
}
}
}
} break;
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
- case GGML_TYPE_Q4_2:
case GGML_TYPE_Q5_0:
case GGML_TYPE_Q5_1:
case GGML_TYPE_Q8_0:
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
- assert(src1->type == GGML_TYPE_I32);
- assert(ggml_nelements(src1) == 3);
+ GGML_ASSERT(src1->type == GGML_TYPE_I32);
+ GGML_ASSERT(ggml_nelements(src1) == 3);
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;
const int n_dims = ((int32_t *) src1->data)[1];
const int mode = ((int32_t *) src1->data)[2];
- //const int64_t ne0 = src0->ne[0];
- const int64_t ne1 = src0->ne[1];
- const int64_t ne2 = src0->ne[2];
- const int64_t ne3 = src0->ne[3];
+ assert(n_past >= 0);
- const int nb0 = src0->nb[0];
- const int nb1 = src0->nb[1];
- const int nb2 = src0->nb[2];
- const int nb3 = src0->nb[3];
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const int64_t ne0 = dst->ne[0];
+ const int64_t ne1 = dst->ne[1];
+ const int64_t ne2 = dst->ne[2];
+ const int64_t ne3 = dst->ne[3];
+
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
//printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
//printf("n_past = %d, ne2 = %d\n", n_past, ne2);
- assert(nb0 == sizeof(float));
+ GGML_ASSERT(nb00 == sizeof(float));
const int ith = params->ith;
const int nth = params->nth;
- const int nr = ggml_nrows(src0);
+ const int nr = ggml_nrows(dst);
+
+ GGML_ASSERT(n_dims <= ne0);
+ GGML_ASSERT(n_dims % 2 == 0);
// rows per thread
const int dr = (nr + nth - 1)/nth;
for (int64_t i3 = 0; i3 < ne3; i3++) {
for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
- const int p = ((mode & 1) == 0 ? n_past + i2 : i2);
+ const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
for (int64_t i1 = 0; i1 < ne1; i1++) {
if (ir++ < ir0) continue;
if (ir > ir1) break;
float theta = (float)p;
- for (int i0 = 0; i0 < n_dims; i0 += 2) {
- const float cos_theta = cosf(theta);
- const float sin_theta = sinf(theta);
+ if (!is_neox) {
+ for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
+ const float cos_theta = cosf(theta);
+ const float sin_theta = sinf(theta);
- theta *= theta_scale;
+ theta *= theta_scale;
- if (!is_neox) {
- const float * const src = (float *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
- float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+ const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
const float x0 = src[0];
const float x1 = src[1];
dst_data[0] = x0*cos_theta - x1*sin_theta;
dst_data[1] = x0*sin_theta + x1*cos_theta;
- } else {
- const float * const src = (float *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + (i0/2)*nb0);
- float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + (i0/2)*nb0);
+ }
+ } else {
+ // TODO: this is probably wrong, but I can't figure it out ..
+ // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
+ for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
+ for (int64_t ic = 0; ic < n_dims; ic += 2) {
+ const float cos_theta = cosf(theta);
+ const float sin_theta = sinf(theta);
- const float x0 = src[0];
- const float x1 = src[n_dims/2];
+ theta *= theta_scale;
- dst_data[0] = x0*cos_theta - x1*sin_theta;
- dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
+ const int64_t i0 = ib*n_dims + ic/2;
+
+ const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+
+ const float x0 = src[0];
+ const float x1 = src[n_dims/2];
+
+ dst_data[0] = x0*cos_theta - x1*sin_theta;
+ dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
+ }
}
}
}
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
+ GGML_ASSERT(src1->type == GGML_TYPE_I32);
+ GGML_ASSERT(ggml_nelements(src1) == 3);
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int n_past = ((int32_t *) src1->data)[0];
+ const int n_dims = ((int32_t *) src1->data)[1];
+ const int mode = ((int32_t *) src1->data)[2];
+
+ assert(n_past >= 0);
+
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const int64_t ne0 = dst->ne[0];
+ const int64_t ne1 = dst->ne[1];
+ const int64_t ne2 = dst->ne[2];
+ const int64_t ne3 = dst->ne[3];
+
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
+ //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
+
+ GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(dst);
+
+ GGML_ASSERT(n_dims <= ne0);
+ GGML_ASSERT(n_dims % 2 == 0);
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ // row index used to determine which thread to use
+ int ir = 0;
+
+ const float theta_scale = powf(10000.0, -2.0f/n_dims);
+
+ const bool is_neox = mode & 2;
+
+ for (int64_t i3 = 0; i3 < ne3; i3++) {
+ for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
+ const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
+ for (int64_t i1 = 0; i1 < ne1; i1++) {
+ if (ir++ < ir0) continue;
+ if (ir > ir1) break;
+
+ float theta = (float)p;
+
+ if (!is_neox) {
+ for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
+ const float cos_theta = cosf(theta);
+ const float sin_theta = sinf(theta);
+
+ theta *= theta_scale;
+
+ const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+
+ const float x0 = GGML_FP16_TO_FP32(src[0]);
+ const float x1 = GGML_FP16_TO_FP32(src[1]);
+
+ dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
+ dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
+ }
+ } else {
+ // TODO: this is probably wrong, but I can't figure it out ..
+ // ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
+ for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
+ for (int64_t ic = 0; ic < n_dims; ic += 2) {
+ const float cos_theta = cosf(theta);
+ const float sin_theta = sinf(theta);
+
+ theta *= theta_scale;
+
+ const int64_t i0 = ib*n_dims + ic/2;
+
+ const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+
+ const float x0 = GGML_FP16_TO_FP32(src[0]);
+ const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
+
+ dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
+ dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void ggml_compute_forward_rope(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F16:
+ {
+ ggml_compute_forward_rope_f16(params, src0, src1, dst);
+ } break;
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_rope_f32(params, src0, src1, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+// ggml_compute_forward_rope_back
+
+static void ggml_compute_forward_rope_back_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ assert(src1->type == GGML_TYPE_I32);
+ assert(ggml_nelements(src1) == 3);
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // y = rope(x, src1)
+ // dx = rope_back(dy, src1)
+ // src0 is dy, src1 contains options
+
+ const int n_past = ((int32_t *) src1->data)[0];
+ const int n_dims = ((int32_t *) src1->data)[1];
+ const int mode = ((int32_t *) src1->data)[2];
+
+ assert(n_past >= 0);
+
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const int64_t ne0 = dst->ne[0];
+ const int64_t ne1 = dst->ne[1];
+ const int64_t ne2 = dst->ne[2];
+ const int64_t ne3 = dst->ne[3];
+
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+
+ //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
+ //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
+
+ assert(nb0 == sizeof(float));
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nr = ggml_nrows(dst);
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ // row index used to determine which thread to use
+ int ir = 0;
+
+ const float theta_scale = powf(10000.0, -2.0f/n_dims);
+
+ const bool is_neox = mode & 2;
+
+ for (int64_t i3 = 0; i3 < ne3; i3++) {
+ for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
+ const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
+ for (int64_t i1 = 0; i1 < ne1; i1++) {
+ if (ir++ < ir0) continue;
+ if (ir > ir1) break;
+
+ float theta = (float)p;
+
+ if (!is_neox) {
+ for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
+ const float cos_theta = cosf(theta);
+ const float sin_theta = sinf(theta);
+
+ theta *= theta_scale;
+
+ const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+
+ const float dy0 = dy[0];
+ const float dy1 = dy[1];
+
+ dx[0] = dy0*cos_theta + dy1*sin_theta;
+ dx[1] = - dy0*sin_theta + dy1*cos_theta;
+ }
+ } else {
+ for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
+ for (int64_t ic = 0; ic < n_dims; ic += 2) {
+ const float cos_theta = cosf(theta);
+ const float sin_theta = sinf(theta);
+
+ theta *= theta_scale;
+
+ const int64_t i0 = ib*n_dims + ic/2;
+
+ const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+
+ const float dy0 = dy[0];
+ const float dy1 = dy[n_dims/2];
+
+ dx[0] = dy0*cos_theta + dy1*sin_theta;
+ dx[n_dims/2] = - dy0*sin_theta + dy1*cos_theta;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static void ggml_compute_forward_rope_back_f16(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
assert(src1->type == GGML_TYPE_I32);
assert(ggml_nelements(src1) == 3);
return;
}
- const int n_past = ((int32_t *) src1->data)[0];
- const int n_dims = ((int32_t *) src1->data)[1];
- const int mode = ((int32_t *) src1->data)[2];
+ // y = rope(x, src1)
+ // dx = rope_back(dy, src1)
+ // src0 is dy, src1 contains options
+
+ const int n_past = ((int32_t *) src1->data)[0];
+ const int n_dims = ((int32_t *) src1->data)[1];
+ const int mode = ((int32_t *) src1->data)[2];
+
+ assert(n_past >= 0);
+
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const int64_t ne0 = dst->ne[0];
+ const int64_t ne1 = dst->ne[1];
+ const int64_t ne2 = dst->ne[2];
+ const int64_t ne3 = dst->ne[3];
- //const int64_t ne0 = src0->ne[0];
- const int64_t ne1 = src0->ne[1];
- const int64_t ne2 = src0->ne[2];
- const int64_t ne3 = src0->ne[3];
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
- const int nb0 = src0->nb[0];
- const int nb1 = src0->nb[1];
- const int nb2 = src0->nb[2];
- const int nb3 = src0->nb[3];
//printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
//printf("n_past = %d, ne2 = %d\n", n_past, ne2);
const int ith = params->ith;
const int nth = params->nth;
- const int nr = ggml_nrows(src0);
+ const int nr = ggml_nrows(dst);
// rows per thread
const int dr = (nr + nth - 1)/nth;
for (int64_t i3 = 0; i3 < ne3; i3++) {
for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
- const int p = ((mode & 1) == 0 ? n_past + i2 : i2);
+ const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
for (int64_t i1 = 0; i1 < ne1; i1++) {
if (ir++ < ir0) continue;
if (ir > ir1) break;
float theta = (float)p;
- for (int i0 = 0; i0 < n_dims; i0 += 2) {
- const float cos_theta = cosf(theta);
- const float sin_theta = sinf(theta);
+ if (!is_neox) {
+ for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
+ const float cos_theta = cosf(theta);
+ const float sin_theta = sinf(theta);
- theta *= theta_scale;
+ theta *= theta_scale;
- if (!is_neox) {
- const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
- ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+ const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
- const float x0 = GGML_FP16_TO_FP32(src[0]);
- const float x1 = GGML_FP16_TO_FP32(src[1]);
+ const float dy0 = GGML_FP16_TO_FP32(dy[0]);
+ const float dy1 = GGML_FP16_TO_FP32(dy[1]);
- dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
- dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
- } else {
- const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + (i0/2)*nb0);
- ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + (i0/2)*nb0);
+ dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta);
+ dx[1] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta);
+ }
+ } else {
+ for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
+ for (int64_t ic = 0; ic < n_dims; ic += 2) {
+ const float cos_theta = cosf(theta);
+ const float sin_theta = sinf(theta);
- const float x0 = GGML_FP16_TO_FP32(src[0]);
- const float x1 = GGML_FP16_TO_FP32(src[n_dims/2]);
+ theta *= theta_scale;
+
+ const int64_t i0 = ib*n_dims + ic/2;
- dst_data[0] = GGML_FP32_TO_FP16(x0*cos_theta - x1*sin_theta);
- dst_data[n_dims/2] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
+ const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+
+ const float dy0 = GGML_FP16_TO_FP32(dy[0]);
+ const float dy1 = GGML_FP16_TO_FP32(dy[n_dims/2]);
+
+ dx[0] = GGML_FP32_TO_FP16( dy0*cos_theta + dy1*sin_theta);
+ dx[n_dims/2] = GGML_FP32_TO_FP16(-dy0*sin_theta + dy1*cos_theta);
+ }
}
}
}
}
}
-static void ggml_compute_forward_rope(
+static void ggml_compute_forward_rope_back(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
switch (src0->type) {
case GGML_TYPE_F16:
{
- ggml_compute_forward_rope_f16(params, src0, src1, dst);
+ ggml_compute_forward_rope_back_f16(params, src0, src1, dst);
} break;
case GGML_TYPE_F32:
{
- ggml_compute_forward_rope_f32(params, src0, src1, dst);
+ ggml_compute_forward_rope_back_f32(params, src0, src1, dst);
} break;
default:
{
{
ggml_compute_forward_add(params, tensor->src0, tensor->src1, tensor);
} break;
+ case GGML_OP_ADD1:
+ {
+ ggml_compute_forward_add1(params, tensor->src0, tensor->src1, tensor);
+ } break;
+ case GGML_OP_ACC:
+ {
+ ggml_compute_forward_acc(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
+ } break;
case GGML_OP_SUB:
{
ggml_compute_forward_sub(params, tensor->src0, tensor->src1, tensor);
{
ggml_compute_forward_sqrt(params, tensor->src0, tensor);
} break;
+ case GGML_OP_LOG:
+ {
+ ggml_compute_forward_log(params, tensor->src0, tensor);
+ } break;
case GGML_OP_SUM:
{
ggml_compute_forward_sum(params, tensor->src0, tensor);
} break;
+ case GGML_OP_SUM_ROWS:
+ {
+ ggml_compute_forward_sum_rows(params, tensor->src0, tensor);
+ } break;
case GGML_OP_MEAN:
{
ggml_compute_forward_mean(params, tensor->src0, tensor);
{
ggml_compute_forward_silu(params, tensor->src0, tensor);
} break;
+ case GGML_OP_SILU_BACK:
+ {
+ ggml_compute_forward_silu_back(params, tensor->src0, tensor->src1, tensor);
+ } break;
case GGML_OP_NORM:
{
ggml_compute_forward_norm(params, tensor->src0, tensor);
{
ggml_compute_forward_rms_norm(params, tensor->src0, tensor);
} break;
+ case GGML_OP_RMS_NORM_BACK:
+ {
+ ggml_compute_forward_rms_norm_back(params, tensor->src0, tensor->src1, tensor);
+ } break;
case GGML_OP_MUL_MAT:
{
ggml_compute_forward_mul_mat(params, tensor->src0, tensor->src1, tensor);
{
ggml_compute_forward_scale(params, tensor->src0, tensor->src1, tensor);
} break;
+ case GGML_OP_SET:
+ {
+ ggml_compute_forward_set(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
+ } break;
case GGML_OP_CPY:
{
ggml_compute_forward_cpy(params, tensor->src0, tensor);
{
ggml_compute_forward_get_rows(params, tensor->src0, tensor->src1, tensor);
} break;
+ case GGML_OP_GET_ROWS_BACK:
+ {
+ ggml_compute_forward_get_rows_back(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
+ } break;
+ case GGML_OP_DIAG:
+ {
+ ggml_compute_forward_diag(params, tensor->src0, tensor);
+ } break;
case GGML_OP_DIAG_MASK_INF:
{
ggml_compute_forward_diag_mask_inf(params, tensor->src0, tensor->src1, tensor);
} break;
+ case GGML_OP_DIAG_MASK_ZERO:
+ {
+ ggml_compute_forward_diag_mask_zero(params, tensor->src0, tensor->src1, tensor);
+ } break;
case GGML_OP_SOFT_MAX:
{
ggml_compute_forward_soft_max(params, tensor->src0, tensor);
{
ggml_compute_forward_rope(params, tensor->src0, tensor->src1, tensor);
} break;
+ case GGML_OP_ROPE_BACK:
+ {
+ ggml_compute_forward_rope_back(params, tensor->src0, tensor->src1, tensor);
+ } break;
case GGML_OP_ALIBI:
{
ggml_compute_forward_alibi(params, tensor->src0, tensor->src1, tensor);
src1->grad = ggml_add_impl(ctx, src1->grad, tensor->grad, inplace);
}
} break;
+ case GGML_OP_ADD1:
+ {
+ if (src0->grad) {
+ src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
+ }
+ if (src1->grad) {
+ src1->grad = ggml_add_impl(ctx,
+ src1->grad,
+ ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
+ inplace);
+ }
+ } break;
+ case GGML_OP_ACC:
+ {
+ if (src0->grad) {
+ src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
+ }
+ if (src1->grad) {
+ GGML_ASSERT(ggml_nelements(tensor->opt[0]) == 5);
+ GGML_ASSERT(tensor->opt[0]->type == GGML_TYPE_I32);
+ const size_t nb1 = (( int32_t * ) tensor->opt[0]->data)[0];
+ const size_t nb2 = (( int32_t * ) tensor->opt[0]->data)[1];
+ const size_t nb3 = (( int32_t * ) tensor->opt[0]->data)[2];
+ const size_t offset = (( int32_t * ) tensor->opt[0]->data)[3];
+
+ struct ggml_tensor * tensor_grad_view = ggml_view_4d(ctx,
+ tensor->grad,
+ src1->grad->ne[0],
+ src1->grad->ne[1],
+ src1->grad->ne[2],
+ src1->grad->ne[3],
+ nb1, nb2, nb3, offset);
+
+ src1->grad =
+ ggml_add_impl(ctx,
+ src1->grad,
+ ggml_reshape(ctx,
+ ggml_cont(ctx, tensor_grad_view),
+ src1->grad),
+ inplace);
+ }
+ } break;
case GGML_OP_SUB:
{
if (src0->grad) {
src0->grad =
ggml_add_impl(ctx,
src0->grad,
- ggml_mul(ctx,
+ ggml_scale(ctx,
ggml_mul(ctx, src0, tensor->grad),
- ggml_repeat(ctx, ggml_new_f32(ctx, 2.0f), src0)),
+ ggml_new_f32(ctx, 2.0f)),
inplace);
}
} break;
case GGML_OP_SQRT:
+ {
+ if (src0->grad) {
+ src0->grad =
+ ggml_add_impl(ctx,
+ src0->grad,
+ ggml_mul(ctx,
+ tensor->grad, // this was not catched by test_grad because in test_grad tensor->grad is 1
+ ggml_div(ctx,
+ ggml_repeat(ctx, ggml_new_f32(ctx, 0.5f), tensor),
+ tensor)),
+ inplace);
+ }
+ } break;
+ case GGML_OP_LOG:
{
if (src0->grad) {
src0->grad =
ggml_add_impl(ctx,
src0->grad,
ggml_div(ctx,
- ggml_repeat(ctx, ggml_new_f32(ctx, 0.5f), tensor),
- tensor),
+ tensor->grad,
+ src0),
inplace);
}
} break;
case GGML_OP_SUM:
+ {
+ if (src0->grad) {
+ src0->grad =
+ ggml_add1_impl(ctx,
+ src0->grad,
+ tensor->grad,
+ inplace);
+ }
+ } break;
+ case GGML_OP_SUM_ROWS:
{
if (src0->grad) {
src0->grad =
ggml_add_impl(ctx,
src0->grad,
- ggml_repeat(ctx, tensor->grad, src0->grad),
+ ggml_repeat(ctx,
+ tensor->grad,
+ src0->grad),
inplace);
}
} break;
} break;
case GGML_OP_REPEAT:
{
+ // necessary for llama
if (src0->grad) {
+ GGML_ASSERT(src0->n_dims == 1 || src0->n_dims == 2);
+ const int nc = tensor->ne[0];
+ const int nr = tensor->ne[1];
+ const int nc0 = src0->ne[0];
+ const int nr0 = src0->ne[1];
+ const int ncr = nc/nc0; // guaranteed to be an integer due to the check in ggml_can_repeat
+ const int nrr = nr/nr0; // guaranteed to be an integer due to the check in ggml_can_repeat
+ // tensor->grad [nc,nr,1,1]
+ // reshape [nc0,nc/nc0,nr0,nr/nr0]
+ // permute [nc0,nr0,nc/nc0,nr/nr0]
+ // substitute [nc0,nr0,ncr,nrr]
+ // reshape [nc0*nr0,ncr*nrr,1,1]
+ // transpose [ncr*nrr,nc0*nr0,1,1]
+ // sum rows [1,nc0*nr0,1,1]
+ // transpose [nc0*nr0,1,1]
+ // reshape [nc0,nr0,1,1] reshape_1d or reshape_2d
+ // add to src0->grad
+
+ int64_t ne[4] = {nc0,ncr,nr0,nrr};
+
+ struct ggml_tensor* F00 = tensor->grad;
+ struct ggml_tensor* F01 = ggml_reshape (ctx, F00, ggml_new_tensor(ctx,tensor->grad->type,4,ne));
+ struct ggml_tensor* F02 = ggml_permute (ctx, F01, 0,2,1,3);
+ struct ggml_tensor* F03 = ggml_cont (ctx, F02);
+ struct ggml_tensor* F04 = ggml_reshape_2d(ctx, F03, nc0*nr0, ncr*nrr);
+ struct ggml_tensor* F05 = ggml_transpose (ctx, F04);
+ struct ggml_tensor* F06 = ggml_cont (ctx, F05);
+ struct ggml_tensor* F07 = ggml_sum_rows (ctx, F06);
+ struct ggml_tensor* F08 = ggml_transpose (ctx, F07);
+ struct ggml_tensor* F09 = ggml_cont (ctx, F08);
+ struct ggml_tensor* F10 = ggml_reshape (ctx, F09, src0->grad);
+
src0->grad =
ggml_add_impl(ctx,
src0->grad,
- ggml_sum(ctx, tensor->grad),
+ F10,
inplace);
}
} break;
GGML_ASSERT(false); // TODO: not implemented
} break;
case GGML_OP_SILU:
+ {
+ // necessary for llama
+ if (src0->grad) {
+ src0->grad = ggml_add_impl(ctx,
+ src0->grad,
+ ggml_silu_back(ctx, src0, tensor->grad),
+ inplace);
+ }
+ } break;
+ case GGML_OP_SILU_BACK:
{
GGML_ASSERT(false); // TODO: not implemented
} break;
GGML_ASSERT(false); // TODO: not implemented
} break;
case GGML_OP_RMS_NORM:
+ {
+ // necessary for llama
+ if (src0->grad) {
+ src0->grad = ggml_add_impl(ctx,
+ src0->grad,
+ ggml_rms_norm_back(ctx, src0, tensor->grad),
+ inplace);
+ }
+ } break;
+ case GGML_OP_RMS_NORM_BACK:
{
GGML_ASSERT(false); // TODO: not implemented
} break;
case GGML_OP_MUL_MAT:
{
+ // https://cs231n.github.io/optimization-2/#staged
+ // # forward pass
+ // s0 = np.random.randn(5, 10)
+ // s1 = np.random.randn(10, 3)
+ // t = s0.dot(s1)
+
+ // # now suppose we had the gradient on t from above in the circuit
+ // dt = np.random.randn(*t.shape) # same shape as t
+ // ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
+ // ds1 = t.T.dot(dt)
+
+ // tensor.shape [m,p]
+ // src0.shape [n,m]
+ // src1.shape [n,p]
+
+ // necessary for llama
if (src0->grad) {
// TODO: this requires outer product - ggml_out_prod(ctx, src1, tensor->grad);
- GGML_ASSERT(false);
+ src0->grad =
+ ggml_add_impl(ctx,
+ src0->grad,
+ // ds0 = dt.dot(s1.T)
+ // ggml_out_prod(ctx, // [n,m]
+ // src1, // [n,p]
+ // tensor->grad), // [m,p]
+ // for now just using A*B==(B.T*A.T).T
+ ggml_cont(ctx, // [n,m]
+ ggml_transpose(ctx, // [n,m]
+ ggml_mul_mat(ctx, // [m,n]
+ ggml_cont(ctx, // [p,m]
+ ggml_transpose(ctx, // [p,m]
+ tensor->grad)), // [m,p]
+ ggml_cont(ctx, // [p,n]
+ ggml_transpose(ctx, // [p,n]
+ src1))))), // [n,p]
+ inplace);
}
if (src1->grad) {
src1->grad =
ggml_add_impl(ctx,
src1->grad,
- ggml_mul_mat(ctx,
- ggml_cont(ctx, ggml_transpose(ctx, src0)),
- tensor->grad),
+ // ds1 = s0.T.dot(dt):
+ ggml_mul_mat(ctx, // [n,p]
+ ggml_cont(ctx, // [m,n]
+ ggml_transpose(ctx, src0)), // [m,n]
+ tensor->grad), // [m,p]
inplace);
}
} break;
case GGML_OP_SCALE:
{
- GGML_ASSERT(false); // TODO: not implemented
+ // necessary for llama
+ if (src0->grad) {
+ src0->grad =
+ ggml_add_impl(ctx,
+ src0->grad,
+ ggml_scale_impl(ctx, tensor->grad, src1, false),
+ inplace);
+ }
+ if (src1->grad) {
+ src1->grad =
+ ggml_add_impl(ctx,
+ src1->grad,
+ ggml_sum(ctx, ggml_mul_impl(ctx, tensor->grad, src0, false)),
+ inplace);
+ }
+ } break;
+ case GGML_OP_SET:
+ {
+ GGML_ASSERT(ggml_nelements(tensor->opt[0]) == 5);
+ GGML_ASSERT(tensor->opt[0]->type == GGML_TYPE_I32);
+ const size_t nb1 = (( int32_t * ) tensor->opt[0]->data)[0];
+ const size_t nb2 = (( int32_t * ) tensor->opt[0]->data)[1];
+ const size_t nb3 = (( int32_t * ) tensor->opt[0]->data)[2];
+ const size_t offset = (( int32_t * ) tensor->opt[0]->data)[3];
+
+ struct ggml_tensor * tensor_grad_view = NULL;
+
+ if (src0->grad || src1->grad) {
+ GGML_ASSERT(src0->type == tensor->type);
+ GGML_ASSERT(tensor->grad->type == tensor->type);
+ GGML_ASSERT(tensor->grad->type == src1->grad->type);
+
+ tensor_grad_view = ggml_view_4d(ctx,
+ tensor->grad,
+ src1->grad->ne[0],
+ src1->grad->ne[1],
+ src1->grad->ne[2],
+ src1->grad->ne[3],
+ nb1, nb2, nb3, offset);
+ }
+
+ if (src0->grad) {
+ src0->grad = ggml_add_impl(ctx,
+ src0->grad,
+ ggml_acc_impl(ctx,
+ tensor->grad,
+ ggml_neg(ctx, tensor_grad_view),
+ nb1, nb2, nb3, offset, false),
+ inplace);
+ }
+
+ if (src1->grad) {
+ src1->grad =
+ ggml_add_impl(ctx,
+ src1->grad,
+ ggml_reshape(ctx,
+ ggml_cont(ctx, tensor_grad_view),
+ src1->grad),
+ inplace);
+ }
} break;
case GGML_OP_CPY:
{
- GGML_ASSERT(false); // TODO: not implemented
+ // necessary for llama
+ // cpy overwrites value of src1 by src0 and returns view(src1)
+ // the overwriting is mathematically equivalent to:
+ // tensor = src0 * 1 + src1 * 0
+ if (src0->grad) {
+ // dsrc0 = dtensor * 1
+ src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
+ }
+ if (src1->grad) {
+ // dsrc1 = dtensor * 0 -> noop
+ }
} break;
case GGML_OP_CONT:
{
- GGML_ASSERT(false); // TODO: not implemented
+ // same as cpy
+ if (src0->grad) {
+ GGML_ASSERT(ggml_is_contiguous(src0->grad));
+ GGML_ASSERT(ggml_is_contiguous(tensor->grad));
+ src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
+ }
} break;
case GGML_OP_RESHAPE:
{
- GGML_ASSERT(false); // TODO: not implemented
+ // necessary for llama
+ if (src0->grad) {
+ src0->grad =
+ ggml_add_impl(ctx, src0->grad,
+ ggml_reshape(ctx, tensor->grad, src0->grad),
+ inplace);
+ }
} break;
case GGML_OP_VIEW:
{
- GGML_ASSERT(false); // not supported
+ // necessary for llama
+ if (src0->grad) {
+ size_t offset;
+ memcpy(&offset, tensor->padding, sizeof(offset));
+
+ size_t nb1 = tensor->nb[1];
+ size_t nb2 = tensor->nb[2];
+ size_t nb3 = tensor->nb[3];
+
+ if (src0->type != src0->grad->type) {
+ // gradient is typically F32, but src0 could be other type
+ size_t ng = ggml_element_size(src0->grad);
+ size_t n0 = ggml_element_size(src0);
+ GGML_ASSERT(offset % n0 == 0);
+ GGML_ASSERT(nb1 % n0 == 0);
+ GGML_ASSERT(nb2 % n0 == 0);
+ GGML_ASSERT(nb3 % n0 == 0);
+ offset = (offset / n0) * ng;
+ nb1 = (nb1 / n0) * ng;
+ nb2 = (nb2 / n0) * ng;
+ nb3 = (nb3 / n0) * ng;
+ }
+
+ src0->grad = ggml_acc_impl(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, inplace);
+ }
} break;
case GGML_OP_PERMUTE:
{
- GGML_ASSERT(false); // TODO: not implemented
+ // necessary for llama
+ if (src0->grad) {
+ int axis0 = tensor->padding[0] & 0x3;
+ int axis1 = tensor->padding[1] & 0x3;
+ int axis2 = tensor->padding[2] & 0x3;
+ int axis3 = tensor->padding[3] & 0x3;
+ int axes_backward[4] = {0,0,0,0};
+ axes_backward[axis0] = 0;
+ axes_backward[axis1] = 1;
+ axes_backward[axis2] = 2;
+ axes_backward[axis3] = 3;
+ src0->grad =
+ ggml_add_impl(ctx, src0->grad,
+ ggml_permute(ctx,
+ tensor->grad,
+ axes_backward[0],
+ axes_backward[1],
+ axes_backward[2],
+ axes_backward[3]),
+ inplace);
+ }
} break;
case GGML_OP_TRANSPOSE:
{
- GGML_ASSERT(false); // TODO: not implemented
+ // necessary for llama
+ if (src0->grad) {
+ src0->grad =
+ ggml_add_impl(ctx, src0->grad,
+ ggml_transpose(ctx, tensor->grad),
+ inplace);
+ }
} break;
case GGML_OP_GET_ROWS:
+ {
+ // necessary for llama (only for tokenizer)
+ if (src0->grad) {
+ src0->grad =
+ ggml_add_impl(ctx, src0->grad,
+ ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
+ inplace);
+ }
+ if (src1->grad) {
+ // noop
+ }
+ } break;
+ case GGML_OP_GET_ROWS_BACK:
{
GGML_ASSERT(false); // TODO: not implemented
} break;
- case GGML_OP_DIAG_MASK_INF:
+ case GGML_OP_DIAG:
{
GGML_ASSERT(false); // TODO: not implemented
} break;
+ case GGML_OP_DIAG_MASK_INF:
+ {
+ // necessary for llama
+ if (src0->grad) {
+ assert(src1->type == GGML_TYPE_I32);
+ assert(ggml_nelements(src1) == 2);
+ const int n_past = ((int32_t *) src1->data)[0];
+ src0->grad =
+ ggml_add_impl(ctx, src0->grad,
+ ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
+ inplace);
+ }
+ if (src1->grad) {
+ // noop
+ }
+ } break;
+ case GGML_OP_DIAG_MASK_ZERO:
+ {
+ // necessary for llama
+ if (src0->grad) {
+ assert(src1->type == GGML_TYPE_I32);
+ assert(ggml_nelements(src1) == 2);
+ const int n_past = ((int32_t *) src1->data)[0];
+ src0->grad =
+ ggml_add_impl(ctx, src0->grad,
+ ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
+ inplace);
+ }
+ if (src1->grad) {
+ // noop
+ }
+ } break;
case GGML_OP_SOFT_MAX:
{
- GGML_ASSERT(false); // TODO: not implemented
+ // necessary for llama
+ if (src0->grad) {
+ // y = softmax(x)
+ //
+ // Jii = yi - yi*yi
+ // Jij = -yi*yj
+ // J = diag(y)-y.*y
+ // dx = J * dy
+ // dxk = sum(Jkj * dyk)
+
+ int64_t ne2[4] = {
+ tensor->ne[0],
+ 1,
+ tensor->ne[1]*tensor->ne[2],
+ tensor->ne[3]
+ };
+ struct ggml_tensor * tensor2 = ggml_cont(ctx,
+ ggml_reshape_4d(ctx,
+ ggml_cont(ctx, tensor),
+ ne2[0], ne2[1], ne2[2], ne2[3]));
+
+ struct ggml_tensor * grad2 = ggml_cont(ctx,
+ ggml_reshape_4d(ctx,
+ ggml_cont(ctx, tensor->grad),
+ ne2[0], ne2[1], ne2[2], ne2[3]));
+
+ struct ggml_tensor * tensor2_t = ggml_cont(ctx, // [1,ne0,ne1*ne2,ne3]
+ ggml_permute(ctx, // [1,ne0,ne1*ne2,ne3]
+ tensor2, // [ne0,1,ne1*ne2,ne3]
+ 1, 0, 2, 3));
+
+ src0->grad =
+ ggml_add_impl(ctx,
+ src0->grad, // [ne0,ne1,ne2,ne3]
+ ggml_reshape(ctx, // [ne0,ne1,ne2,ne3]
+ ggml_mul_mat(ctx, // [ne0,1,ne1*ne2,ne3]
+ ggml_sub(ctx, // [ne0,ne0,ne1*ne2,ne3]
+ ggml_diag(ctx, // [ne0,ne0,ne1*ne2,ne3]
+ tensor2), // [ne0,1,ne1*ne2,ne3]
+ ggml_mul_mat(ctx, // [ne0,ne0,ne1*ne2,ne3]
+ tensor2_t, // [1,ne0,ne1*ne2,ne3]
+ tensor2_t)), // [1,ne0,ne1*ne2,ne3]
+ grad2), // [ne0,1,ne1*ne2,ne3]
+ src0->grad),
+ inplace);
+ }
} break;
case GGML_OP_ROPE:
{
- GGML_ASSERT(false); // TODO: not implemented
+ // necessary for llama
+ if (src0->grad) {
+ assert(src1->type == GGML_TYPE_I32);
+ assert(ggml_nelements(src1) == 3);
+ const int n_past = ((int32_t *) src1->data)[0];
+ const int n_dims = ((int32_t *) src1->data)[1];
+ const int mode = ((int32_t *) src1->data)[2];
+ src0->grad = ggml_add_impl(ctx,
+ src0->grad,
+ ggml_rope_back(ctx,
+ tensor->grad,
+ n_past,
+ n_dims,
+ mode),
+ inplace);
+ }
+ if (src1->grad) {
+ // noop
+ }
+ } break;
+ case GGML_OP_ROPE_BACK:
+ {
+ if (src0->grad) {
+ assert(src1->type == GGML_TYPE_I32);
+ assert(ggml_nelements(src1) == 3);
+ const int n_past = ((int32_t *) src1->data)[0];
+ const int n_dims = ((int32_t *) src1->data)[1];
+ const int mode = ((int32_t *) src1->data)[2];
+ src0->grad = ggml_add_impl(ctx,
+ src0->grad,
+ ggml_rope(ctx,
+ tensor->grad,
+ n_past,
+ n_dims,
+ mode),
+ inplace);
+ }
+ if (src1->grad) {
+ // noop
+ }
} break;
case GGML_OP_CONV_1D_1S:
{
#define ggml_lock_init(x) UNUSED(x)
#define ggml_lock_destroy(x) UNUSED(x)
+#if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
+#define ggml_lock_lock(x) _mm_pause()
+#else
#define ggml_lock_lock(x) UNUSED(x)
+#endif
#define ggml_lock_unlock(x) UNUSED(x)
#define GGML_LOCK_INITIALIZER 0
work_size = MAX(work_size, cur);
} break;
case GGML_OP_ADD:
+ case GGML_OP_ADD1:
{
node->n_tasks = n_threads;
cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->src0->ne[0] * n_threads;
}
+ work_size = MAX(work_size, cur);
+ } break;
+ case GGML_OP_ACC:
+ {
+ node->n_tasks = n_threads;
+
+ size_t cur = 0;
+
+ if (ggml_is_quantized(node->src0->type)) {
+ cur = GGML_TYPE_SIZE[GGML_TYPE_F32] * node->src1->ne[0] * n_threads;
+ }
+
work_size = MAX(work_size, cur);
} break;
case GGML_OP_SUB:
- case GGML_OP_MUL:
case GGML_OP_DIV:
case GGML_OP_SQR:
case GGML_OP_SQRT:
+ case GGML_OP_LOG:
case GGML_OP_SUM:
+ case GGML_OP_SUM_ROWS:
case GGML_OP_MEAN:
case GGML_OP_REPEAT:
case GGML_OP_ABS:
{
node->n_tasks = 1;
} break;
+ case GGML_OP_MUL:
case GGML_OP_GELU:
- {
- node->n_tasks = n_threads;
- } break;
case GGML_OP_SILU:
- {
- node->n_tasks = n_threads;
- } break;
+ case GGML_OP_SILU_BACK:
case GGML_OP_NORM:
case GGML_OP_RMS_NORM:
+ case GGML_OP_RMS_NORM_BACK:
{
node->n_tasks = n_threads;
} break;
{
node->n_tasks = n_threads;
} break;
+ case GGML_OP_SET:
case GGML_OP_CONT:
case GGML_OP_RESHAPE:
case GGML_OP_VIEW:
case GGML_OP_PERMUTE:
case GGML_OP_TRANSPOSE:
case GGML_OP_GET_ROWS:
- case GGML_OP_DIAG_MASK_INF:
+ case GGML_OP_GET_ROWS_BACK:
+ case GGML_OP_DIAG:
+ case GGML_OP_DIAG_MASK_ZERO:
{
node->n_tasks = 1;
} break;
+ case GGML_OP_DIAG_MASK_INF:
case GGML_OP_SOFT_MAX:
- {
- node->n_tasks = n_threads;
- } break;
case GGML_OP_ROPE:
+ case GGML_OP_ROPE_BACK:
{
node->n_tasks = n_threads;
} break;
// build forward + backward compute graphs
struct ggml_cgraph gf = ggml_build_forward (f);
- struct ggml_cgraph gb = ggml_build_backward(ctx, &gf, false);
+ struct ggml_cgraph gb = ggml_build_backward(ctx, &gf, true);
switch (params.type) {
case GGML_OPT_ADAM:
assert(k % QK4_0 == 0);
const int nb = k / QK4_0;
- for (int j = 0; j < n; j += k) {
- block_q4_0 * restrict y = (block_q4_0 *)dst + j/QK4_0;
+ for (int b = 0; b < n; b += k) {
+ block_q4_0 * restrict y = (block_q4_0 *) dst + b/QK4_0;
- quantize_row_q4_0_reference(src + j, y, k);
+ quantize_row_q4_0_reference(src + b, y, k);
for (int i = 0; i < nb; i++) {
- for (int l = 0; l < QK4_0; l += 2) {
- const uint8_t vi0 = y[i].qs[l/2] & 0x0F;
- const uint8_t vi1 = y[i].qs[l/2] >> 4;
+ for (int j = 0; j < QK4_0; j += 2) {
+ const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
+ const uint8_t vi1 = y[i].qs[j/2] >> 4;
hist[vi0]++;
hist[vi1]++;
assert(k % QK4_1 == 0);
const int nb = k / QK4_1;
- for (int j = 0; j < n; j += k) {
- block_q4_1 * restrict y = (block_q4_1 *)dst + j/QK4_1;
+ for (int b = 0; b < n; b += k) {
+ block_q4_1 * restrict y = (block_q4_1 *) dst + b/QK4_1;
- quantize_row_q4_1_reference(src + j, y, k);
+ quantize_row_q4_1_reference(src + b, y, k);
for (int i = 0; i < nb; i++) {
- for (int l = 0; l < QK4_1; l += 2) {
- const uint8_t vi0 = y[i].qs[l/2] & 0x0F;
- const uint8_t vi1 = y[i].qs[l/2] >> 4;
+ for (int j = 0; j < QK4_1; j += 2) {
+ const uint8_t vi0 = y[i].qs[j/2] & 0x0F;
+ const uint8_t vi1 = y[i].qs[j/2] >> 4;
hist[vi0]++;
hist[vi1]++;
return (n/QK4_1*sizeof(block_q4_1));
}
-size_t ggml_quantize_q4_2(const float * src, void * dst, int n, int k, int64_t * hist) {
- assert(k % QK4_2 == 0);
- const int nb = k / QK4_2;
-
- for (int j = 0; j < n; j += k) {
- block_q4_2 * restrict y = (block_q4_2 *)dst + j/QK4_2;
-
- quantize_row_q4_2_reference(src + j, y, k);
-
- for (int i = 0; i < nb; i++) {
- for (int l = 0; l < QK4_2; l += 2) {
- const uint8_t vi0 = y[i].qs[l/2] & 0x0F;
- const uint8_t vi1 = y[i].qs[l/2] >> 4;
-
- hist[vi0]++;
- hist[vi1]++;
- }
- }
- }
-
- return (n/QK4_2*sizeof(block_q4_2));
-}
-
size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist) {
assert(k % QK5_0 == 0);
const int nb = k / QK5_0;
- for (int j = 0; j < n; j += k) {
- block_q5_0 * restrict y = (block_q5_0 *)dst + j/QK5_0;
+ for (int b = 0; b < n; b += k) {
+ block_q5_0 * restrict y = (block_q5_0 *)dst + b/QK5_0;
- quantize_row_q5_0_reference(src + j, y, k);
+ quantize_row_q5_0_reference(src + b, y, k);
for (int i = 0; i < nb; i++) {
uint32_t qh;
memcpy(&qh, &y[i].qh, sizeof(qh));
- for (int l = 0; l < QK5_0; l += 2) {
- const uint8_t vh0 = ((qh & (1u << (l + 0))) >> (l + 0)) << 4;
- const uint8_t vh1 = ((qh & (1u << (l + 1))) >> (l + 1)) << 4;
+ for (int j = 0; j < QK5_0; j += 2) {
+ const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
+ const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
// cast to 16 bins
- const uint8_t vi0 = ((y[i].qs[l/2] & 0x0F) | vh0) / 2;
- const uint8_t vi1 = ((y[i].qs[l/2] >> 4) | vh1) / 2;
+ const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
+ const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
hist[vi0]++;
hist[vi1]++;
assert(k % QK5_1 == 0);
const int nb = k / QK5_1;
- for (int j = 0; j < n; j += k) {
- block_q5_1 * restrict y = (block_q5_1 *)dst + j/QK5_1;
+ for (int b = 0; b < n; b += k) {
+ block_q5_1 * restrict y = (block_q5_1 *)dst + b/QK5_1;
- quantize_row_q5_1_reference(src + j, y, k);
+ quantize_row_q5_1_reference(src + b, y, k);
for (int i = 0; i < nb; i++) {
uint32_t qh;
memcpy(&qh, &y[i].qh, sizeof(qh));
- for (int l = 0; l < QK5_1; l += 2) {
- const uint8_t vh0 = ((qh & (1u << (l + 0))) >> (l + 0)) << 4;
- const uint8_t vh1 = ((qh & (1u << (l + 1))) >> (l + 1)) << 4;
+ for (int j = 0; j < QK5_1; j += 2) {
+ const uint8_t vh0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
+ const uint8_t vh1 = ((qh & (1u << (j + 16))) >> (j + 12));
// cast to 16 bins
- const uint8_t vi0 = ((y[i].qs[l/2] & 0x0F) | vh0) / 2;
- const uint8_t vi1 = ((y[i].qs[l/2] >> 4) | vh1) / 2;
+ const uint8_t vi0 = ((y[i].qs[j/2] & 0x0F) | vh0) / 2;
+ const uint8_t vi1 = ((y[i].qs[j/2] >> 4) | vh1) / 2;
hist[vi0]++;
hist[vi1]++;
assert(k % QK8_0 == 0);
const int nb = k / QK8_0;
- for (int j = 0; j < n; j += k) {
- block_q8_0 * restrict y = (block_q8_0 *)dst + j/QK8_0;
+ for (int b = 0; b < n; b += k) {
+ block_q8_0 * restrict y = (block_q8_0 *)dst + b/QK8_0;
- quantize_row_q8_0_reference(src + j, y, k);
+ quantize_row_q8_0_reference(src + b, y, k);
for (int i = 0; i < nb; i++) {
- for (int l = 0; l < QK8_0; ++l) {
- const int8_t vi = y[i].qs[l];
+ for (int j = 0; j < QK8_0; ++j) {
+ const int8_t vi = y[i].qs[j];
hist[vi/16 + 8]++;
}
block_q4_1 * block = (block_q4_1*)dst + start / QK4_1;
result = ggml_quantize_q4_1(src + start, block, n, n, hist);
} break;
- case GGML_TYPE_Q4_2:
- {
- GGML_ASSERT(start % QK4_2 == 0);
- block_q4_2 * block = (block_q4_2*)dst + start / QK4_2;
- result = ggml_quantize_q4_2(src + start, block, n, n, hist);
- } break;
case GGML_TYPE_Q5_0:
{
GGML_ASSERT(start % QK5_0 == 0);