--- /dev/null
+#include "llama.h"
+
+#include "ggml.h"
+
+#include <cinttypes>
+#include <fstream>
+#include <random>
+#include <map>
+#include <unordered_map>
+#include <queue>
+#include <regex>
+#include <cassert>
+#include <cstring>
+
+#define LLAMA_USE_SCRATCH
+#define LLAMA_MAX_SCRATCH_BUFFERS 16
+
+#define LLAMA_ASSERT(x) \
+ do { \
+ if (!(x)) { \
+ fprintf(stderr, "LLAMA_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
+ abort(); \
+ } \
+ } while (0)
+
+
+// determine number of model parts based on the dimension
+static const std::unordered_map<int, int> LLAMA_N_PARTS = {
+ { 4096, 1 },
+ { 5120, 2 },
+ { 6656, 4 },
+ { 8192, 8 },
+};
+
+// available llama models
+enum e_model {
+ MODEL_UNKNOWN,
+ MODEL_7B,
+ MODEL_13B,
+ MODEL_30B,
+ MODEL_65B,
+};
+
+static const size_t MB = 1024*1024;
+
+// computed for n_ctx == 2048
+// TODO: dynamically determine these sizes
+// needs modifications in ggml
+
+static const std::map<e_model, size_t> MEM_REQ_SCRATCH0 = {
+ { MODEL_7B, 512ull*MB },
+ { MODEL_13B, 512ull*MB },
+ { MODEL_30B, 512ull*MB },
+ { MODEL_65B, 512ull*MB },
+};
+
+static const std::map<e_model, size_t> MEM_REQ_SCRATCH1 = {
+ { MODEL_7B, 512ull*MB },
+ { MODEL_13B, 512ull*MB },
+ { MODEL_30B, 512ull*MB },
+ { MODEL_65B, 512ull*MB },
+};
+
+// 2*n_embd*n_ctx*n_layer*sizeof(float16)
+static const std::map<e_model, size_t> MEM_REQ_KV_SELF = {
+ { MODEL_7B, 1026ull*MB },
+ { MODEL_13B, 1608ull*MB },
+ { MODEL_30B, 3124ull*MB },
+ { MODEL_65B, 5120ull*MB },
+};
+
+// this is mostly needed for temporary mul_mat buffers to dequantize the data
+// not actually needed if BLAS is disabled
+static const std::map<e_model, size_t> MEM_REQ_EVAL = {
+ { MODEL_7B, 768ull*MB },
+ { MODEL_13B, 1024ull*MB },
+ { MODEL_30B, 1280ull*MB },
+ { MODEL_65B, 1536ull*MB },
+};
+
+// default hparams (LLaMA 7B)
+struct llama_hparams {
+ int32_t n_vocab = 32000;
+ int32_t n_ctx = 512; // this is provided as user input?
+ int32_t n_embd = 4096;
+ int32_t n_mult = 256;
+ int32_t n_head = 32;
+ int32_t n_layer = 32;
+ int32_t n_rot = 64;
+ int32_t f16 = 1;
+};
+
+struct llama_layer {
+ // normalization
+ struct ggml_tensor * attention_norm;
+
+ // attention
+ struct ggml_tensor * wq;
+ struct ggml_tensor * wk;
+ struct ggml_tensor * wv;
+ struct ggml_tensor * wo;
+
+ // normalization
+ struct ggml_tensor * ffn_norm;
+
+ // ff
+ struct ggml_tensor * w1;
+ struct ggml_tensor * w2;
+ struct ggml_tensor * w3;
+};
+
+struct llama_kv_cache {
+ struct ggml_tensor * k;
+ struct ggml_tensor * v;
+
+ struct ggml_context * ctx;
+
+ std::vector<uint8_t> buf;
+
+ int n; // number of tokens currently in the cache
+};
+
+struct llama_model {
+ e_model type = MODEL_UNKNOWN;
+
+ llama_hparams hparams;
+
+ struct ggml_tensor * tok_embeddings;
+
+ struct ggml_tensor * norm;
+ struct ggml_tensor * output;
+
+ std::vector<llama_layer> layers;
+
+ // context
+ struct ggml_context * ctx;
+
+ // key + value cache for the self attention
+ // TODO: move to llama_state
+ struct llama_kv_cache kv_self;
+
+ // the model memory buffer
+ std::vector<uint8_t> buf;
+
+ // tensors
+ int n_loaded;
+ std::unordered_map<std::string, struct ggml_tensor *> tensors;
+};
+
+struct llama_vocab {
+ using id = int32_t;
+ using token = std::string;
+
+ struct token_score {
+ token tok;
+ float score;
+ };
+
+ std::unordered_map<token, id> token_to_id;
+ std::vector<token_score> id_to_token;
+};
+
+struct llama_context {
+ std::mt19937 rng;
+
+ int64_t t_load_us = 0;
+ int64_t t_start_us = 0;
+
+ int64_t t_sample_us = 0;
+ int64_t t_eval_us = 0;
+ int64_t t_p_eval_us = 0;
+
+ int32_t n_sample = 0; // number of tokens sampled
+ int32_t n_eval = 0; // number of eval calls
+ int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
+
+ llama_model model;
+ llama_vocab vocab;
+
+ size_t mem_per_token = 0;
+
+ // decode output (2-dimensional array: [n_tokens][n_vocab])
+ std::vector<float> logits;
+ bool logits_all = false;
+
+ // input embedding (1-dimensional array: [n_embd])
+ std::vector<float> embedding;
+
+ // memory buffers used to evaluate the model
+ // TODO: move in llama_state
+ std::vector<uint8_t> buf_compute;
+ std::vector<uint8_t> buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS];
+
+ int buf_last = 0;
+ size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 };
+
+ void use_buf(struct ggml_context * ctx, int i) {
+#if defined(LLAMA_USE_SCRATCH)
+ size_t last_size = 0;
+
+ if (i == -1) {
+ last_size = ggml_set_scratch(ctx, { 0, 0, nullptr, });
+ } else {
+ auto & buf = buf_scratch[i];
+ last_size = ggml_set_scratch(ctx, { 0, buf.size(), buf.data(), });
+ }
+
+ if (buf_last >= 0) {
+ buf_max_size[buf_last] = std::max(buf_max_size[buf_last], last_size);
+ }
+
+ buf_last = i;
+#else
+ (void) i;
+ (void) ctx;
+#endif
+ }
+
+ size_t get_buf_max_mem(int i) const {
+#if defined(LLAMA_USE_SCRATCH)
+ return buf_max_size[i];
+#else
+ (void) i;
+ return 0;
+#endif
+ }
+};
+
+//
+// kv cache
+//
+
+static bool kv_cache_init(
+ const struct llama_hparams & hparams,
+ struct llama_kv_cache & cache,
+ ggml_type wtype,
+ int n_ctx) {
+ const int n_embd = hparams.n_embd;
+ const int n_layer = hparams.n_layer;
+
+ const int n_mem = n_layer*n_ctx;
+ const int n_elements = n_embd*n_mem;
+
+ cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
+
+ struct ggml_init_params params;
+ params.mem_size = cache.buf.size();
+ params.mem_buffer = cache.buf.data();
+
+ cache.ctx = ggml_init(params);
+
+ if (!cache.ctx) {
+ fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
+ return false;
+ }
+
+ cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
+ cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
+
+ return true;
+}
+
+static void kv_cache_free(struct llama_kv_cache & cache) {
+ if (cache.ctx) {
+ ggml_free(cache.ctx);
+ cache.ctx = nullptr;
+ }
+}
+
+struct llama_context_params llama_context_default_params() {
+ struct llama_context_params result = {
+ /*.n_ctx =*/ 512,
+ /*.n_parts =*/ -1,
+ /*.seed =*/ 0,
+ /*.f16_kv =*/ false,
+ /*.logits_all =*/ false,
+ /*.vocab_only =*/ false,
+ /*.use_mlock =*/ false,
+ /*.embedding =*/ false,
+ /*.progress_callback =*/ nullptr,
+ /*.progress_callback_user_data =*/ nullptr,
+ };
+
+ return result;
+}
+
+//
+// model loading
+//
+
+static bool llama_model_load(
+ const std::string & fname,
+ llama_context & lctx,
+ int n_ctx,
+ int n_parts,
+ ggml_type memory_type,
+ bool vocab_only,
+ llama_progress_callback progress_callback,
+ void *progress_callback_user_data) {
+ fprintf(stderr, "%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
+
+ const int64_t t_start_us = ggml_time_us();
+
+ lctx.t_start_us = t_start_us;
+
+ std::vector<char> f_buf(1024*1024);
+
+ auto & model = lctx.model;
+ auto & vocab = lctx.vocab;
+
+ auto fin = std::ifstream(fname, std::ios::binary);
+ fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
+ if (!fin) {
+ fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
+ return false;
+ }
+
+ // verify magic
+ {
+ uint32_t magic;
+ fin.read((char *) &magic, sizeof(magic));
+ if (magic == LLAMA_FILE_MAGIC_UNVERSIONED) {
+ fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
+ __func__, fname.c_str());
+ return false;
+ }
+ if (magic != LLAMA_FILE_MAGIC) {
+ fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
+ return false;
+ }
+
+ uint32_t format_version;
+ fin.read((char *) &format_version, sizeof(format_version));
+
+ if (format_version != LLAMA_FILE_VERSION) {
+ fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ", expected %d)\n",
+ __func__, fname.c_str(), format_version, LLAMA_FILE_VERSION);
+ return false;
+ }
+ }
+
+ int n_ff = 0;
+
+ // load hparams
+ {
+ auto & hparams = model.hparams;
+
+ fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
+ //fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
+ fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
+ fin.read((char *) &hparams.n_mult, sizeof(hparams.n_mult));
+ fin.read((char *) &hparams.n_head, sizeof(hparams.n_head));
+ fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
+ fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
+ fin.read((char *) &hparams.f16, sizeof(hparams.f16));
+
+ hparams.n_ctx = n_ctx;
+
+ n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
+
+ if (n_parts < 1) {
+ n_parts = LLAMA_N_PARTS.at(hparams.n_embd);
+ }
+
+ // temp warning to tell the user to use "--n_parts"
+ if (hparams.f16 == 4 && n_parts != 1) {
+ fprintf(stderr, "%s: GPTQ model detected - are you sure n_parts should be %d? we normally expect it to be 1\n", __func__, n_parts);
+ fprintf(stderr, "%s: use '--n_parts 1' if necessary\n", __func__);
+ }
+
+ if (hparams.n_layer == 32) {
+ model.type = e_model::MODEL_7B;
+ }
+
+ if (hparams.n_layer == 40) {
+ model.type = e_model::MODEL_13B;
+ }
+
+ if (hparams.n_layer == 60) {
+ model.type = e_model::MODEL_30B;
+ }
+
+ if (hparams.n_layer == 80) {
+ model.type = e_model::MODEL_65B;
+ }
+
+ fprintf(stderr, "%s: n_vocab = %d\n", __func__, hparams.n_vocab);
+ fprintf(stderr, "%s: n_ctx = %d\n", __func__, hparams.n_ctx);
+ fprintf(stderr, "%s: n_embd = %d\n", __func__, hparams.n_embd);
+ fprintf(stderr, "%s: n_mult = %d\n", __func__, hparams.n_mult);
+ fprintf(stderr, "%s: n_head = %d\n", __func__, hparams.n_head);
+ fprintf(stderr, "%s: n_layer = %d\n", __func__, hparams.n_layer);
+ fprintf(stderr, "%s: n_rot = %d\n", __func__, hparams.n_rot);
+ fprintf(stderr, "%s: f16 = %d\n", __func__, hparams.f16);
+ fprintf(stderr, "%s: n_ff = %d\n", __func__, n_ff);
+ fprintf(stderr, "%s: n_parts = %d\n", __func__, n_parts);
+ fprintf(stderr, "%s: type = %d\n", __func__, model.type);
+ }
+
+ // load vocab
+ {
+ std::string word;
+ vocab.id_to_token.resize(model.hparams.n_vocab);
+ std::vector<char> tmp(64);
+
+ for (int i = 0; i < model.hparams.n_vocab; i++) {
+ uint32_t len;
+ fin.read((char *) &len, sizeof(len));
+
+ word.resize(len);
+ if (len > 0) {
+ tmp.resize(len);
+ fin.read(tmp.data(), len);
+ word.assign(tmp.data(), len);
+ } else {
+ word.clear();
+ }
+
+ float score;
+ fin.read((char *) &score, sizeof(score));
+
+ vocab.token_to_id[word] = i;
+
+ auto &tok_score = vocab.id_to_token[i];
+ tok_score.tok = word;
+ tok_score.score = score;
+ }
+ }
+
+ if (vocab_only) {
+ return true;
+ }
+
+ // for the big tensors, we have the option to store the data in 16-bit floats or quantized
+ // in order to save memory and also to speed up the computation
+ // wtype is for per-layer weights, while vtype is for other weights
+ ggml_type wtype, vtype;
+ switch (model.hparams.f16) {
+ case 0: wtype = vtype = GGML_TYPE_F32; break;
+ case 1: wtype = vtype = GGML_TYPE_F16; break;
+ case 2: wtype = vtype = GGML_TYPE_Q4_0; break;
+ case 3: wtype = vtype = GGML_TYPE_Q4_1; break;
+ case 4: wtype = GGML_TYPE_Q4_1; vtype = GGML_TYPE_F16; break;
+ default:
+ {
+ fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n",
+ __func__, fname.c_str(), model.hparams.f16);
+ return false;
+ }
+ }
+
+ auto & ctx = model.ctx;
+
+ size_t ctx_size = 0;
+
+ {
+ const auto & hparams = model.hparams;
+
+ const int n_embd = hparams.n_embd;
+ const int n_layer = hparams.n_layer;
+ const int n_ctx = hparams.n_ctx;
+ const int n_vocab = hparams.n_vocab;
+
+ ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // tok_embeddings
+
+ ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // norm
+
+ ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // output
+
+ ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // attention_norm
+
+ ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wq
+ ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wk
+ ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wv
+ ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wo
+
+ ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ffn_norm
+
+ ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w1
+ ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2
+ ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3
+
+ ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
+ ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v
+
+ ctx_size += (5 + 10*n_layer)*256; // object overhead
+
+ fprintf(stderr, "%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0));
+ }
+
+ // print memory requirements
+ {
+ const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1;
+
+ // this is the total memory required to run the inference
+ const size_t mem_required =
+ ctx_size +
+ MEM_REQ_SCRATCH0.at(model.type) +
+ MEM_REQ_SCRATCH1.at(model.type) +
+ MEM_REQ_EVAL.at (model.type);
+
+ // this is the memory required by one llama_state
+ const size_t mem_required_state =
+ scale*MEM_REQ_KV_SELF.at(model.type);
+
+ fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
+ mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
+ }
+
+ // create the ggml context
+ {
+ lctx.model.buf.resize(ctx_size);
+
+ struct ggml_init_params params = {
+ /*.mem_size =*/ lctx.model.buf.size(),
+ /*.mem_buffer =*/ lctx.model.buf.data(),
+ };
+
+ model.ctx = ggml_init(params);
+ if (!model.ctx) {
+ fprintf(stderr, "%s: ggml_init() failed\n", __func__);
+ return false;
+ }
+ }
+
+ // prepare memory for the weights
+ {
+ const auto & hparams = model.hparams;
+
+ const int n_embd = hparams.n_embd;
+ const int n_layer = hparams.n_layer;
+ const int n_vocab = hparams.n_vocab;
+
+ model.layers.resize(n_layer);
+
+ model.tok_embeddings = ggml_new_tensor_2d(ctx, vtype, n_embd, n_vocab);
+
+ model.norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
+ model.output = ggml_new_tensor_2d(ctx, vtype, n_embd, n_vocab);
+
+ // map by name
+ model.tensors["tok_embeddings.weight"] = model.tok_embeddings;
+
+ model.tensors["norm.weight"] = model.norm;
+ model.tensors["output.weight"] = model.output;
+
+ for (int i = 0; i < n_layer; ++i) {
+ auto & layer = model.layers[i];
+
+ layer.attention_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
+
+ layer.wq = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
+ layer.wk = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
+ layer.wv = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
+ layer.wo = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
+
+ layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
+
+ layer.w1 = ggml_new_tensor_2d(ctx, wtype, n_embd, n_ff);
+ layer.w2 = ggml_new_tensor_2d(ctx, wtype, n_ff, n_embd);
+ layer.w3 = ggml_new_tensor_2d(ctx, wtype, n_embd, n_ff);
+
+ // map by name
+ model.tensors["layers." + std::to_string(i) + ".attention_norm.weight"] = layer.attention_norm;
+
+ model.tensors["layers." + std::to_string(i) + ".attention.wq.weight"] = layer.wq;
+ model.tensors["layers." + std::to_string(i) + ".attention.wk.weight"] = layer.wk;
+ model.tensors["layers." + std::to_string(i) + ".attention.wv.weight"] = layer.wv;
+ model.tensors["layers." + std::to_string(i) + ".attention.wo.weight"] = layer.wo;
+
+ model.tensors["layers." + std::to_string(i) + ".ffn_norm.weight"] = layer.ffn_norm;
+
+ model.tensors["layers." + std::to_string(i) + ".feed_forward.w1.weight"] = layer.w1;
+ model.tensors["layers." + std::to_string(i) + ".feed_forward.w2.weight"] = layer.w2;
+ model.tensors["layers." + std::to_string(i) + ".feed_forward.w3.weight"] = layer.w3;
+ }
+ }
+
+ const size_t file_offset = fin.tellg();
+
+ fin.close();
+
+ std::vector<uint8_t> tmp;
+
+ if (progress_callback) {
+ progress_callback(0.0, progress_callback_user_data);
+ }
+
+ for (int i = 0; i < n_parts; ++i) {
+ const int part_id = i;
+ //const int part_id = n_parts - i - 1;
+
+ std::string fname_part = fname;
+ if (i > 0) {
+ fname_part += "." + std::to_string(i);
+ }
+
+ fprintf(stderr, "%s: loading model part %d/%d from '%s'\n", __func__, i+1, n_parts, fname_part.c_str());
+
+ fin = std::ifstream(fname_part, std::ios::binary);
+ fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
+
+ fin.seekg(0, fin.end);
+ const size_t file_size = fin.tellg();
+
+ fin.seekg(file_offset);
+
+ // load weights
+ {
+ size_t total_size = 0;
+
+ model.n_loaded = 0;
+
+ fprintf(stderr, "%s: ", __func__);
+
+ while (true) {
+ int32_t n_dims;
+ int32_t length;
+ int32_t ftype;
+
+ fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
+ fin.read(reinterpret_cast<char *>(&length), sizeof(length));
+ fin.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
+
+ if (fin.eof()) {
+ break;
+ }
+
+ int32_t nelements = 1;
+ int32_t ne[2] = { 1, 1 };
+ for (int i = 0; i < n_dims; ++i) {
+ fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
+ nelements *= ne[i];
+ }
+
+ std::string name(length, 0);
+ fin.read(&name[0], length);
+
+ if (model.tensors.find(name.data()) == model.tensors.end()) {
+ fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
+ return false;
+ }
+
+ // split_type = 0: split by columns
+ // split_type = 1: split by rows
+ int split_type = 0;
+
+ // split_type = 0:
+ // regex:
+ // - tok_embeddings.*
+ // - layers.*.attention.wo.weight
+ // - layers.*.feed_forward.w2.weight
+
+ // split_type = 1:
+ // regex:
+ // - output.*
+ // - layers.*.attention.wq.weight
+ // - layers.*.attention.wk.weight
+ // - layers.*.attention.wv.weight
+ // - layers.*.feed_forward.w1.weight
+ // - layers.*.feed_forward.w3.weight
+ if (name.find("tok_embeddings") != std::string::npos) {
+ split_type = 0;
+ } else if (name.find("layers") != std::string::npos) {
+ if (name.find("attention.wo.weight") != std::string::npos) {
+ split_type = 0;
+ } else if (name.find("feed_forward.w2.weight") != std::string::npos) {
+ split_type = 0;
+ } else {
+ split_type = 1;
+ }
+ } else if (name.find("output") != std::string::npos) {
+ split_type = 1;
+ }
+
+ auto tensor = model.tensors[name.data()];
+
+ if (n_dims == 1) {
+ if (ggml_nelements(tensor) != nelements) {
+ fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
+ return false;
+ }
+ } else {
+ if (ggml_nelements(tensor)/n_parts != nelements) {
+ fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
+ return false;
+ }
+ }
+
+ if (n_dims == 1) {
+ if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
+ fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
+ __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
+ return false;
+ }
+ } else {
+ if (split_type == 0) {
+ if (tensor->ne[0]/n_parts != ne[0] || tensor->ne[1] != ne[1]) {
+ fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
+ __func__, name.data(), tensor->ne[0]/n_parts, tensor->ne[1], ne[0], ne[1]);
+ return false;
+ }
+ } else {
+ if (tensor->ne[0] != ne[0] || tensor->ne[1]/n_parts != ne[1]) {
+ fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
+ __func__, name.data(), tensor->ne[0], tensor->ne[1]/n_parts, ne[0], ne[1]);
+ return false;
+ }
+ }
+ }
+
+ if (0) {
+ static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", };
+ fprintf(stderr, "%24s - [%5d, %5d], type = %6s, split = %d\n", name.data(), ne[0], ne[1], ftype_str[ftype], split_type);
+ }
+
+ size_t bpe = 0;
+
+ switch (ftype) {
+ case 0: bpe = ggml_type_size(GGML_TYPE_F32); break;
+ case 1: bpe = ggml_type_size(GGML_TYPE_F16); break;
+ case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break;
+ case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break;
+ default:
+ {
+ fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype);
+ return false;
+ }
+ };
+
+ if (n_dims == 1 || n_parts == 1) {
+ if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
+ fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
+ __func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
+ return false;
+ }
+
+ if (part_id == 0) {
+ fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
+ } else {
+ fin.seekg(ggml_nbytes(tensor), std::ios::cur);
+ }
+
+ total_size += ggml_nbytes(tensor);
+ } else {
+ if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)/n_parts) {
+ fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
+ __func__, name.data(), ggml_nbytes(tensor)/n_parts, nelements*bpe);
+ return false;
+ }
+
+ if (split_type == 0) {
+ const int np0 = ne[0];
+
+ const size_t row_size = (tensor->ne[0]/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
+ assert(row_size == tensor->nb[1]);
+
+ for (int i1 = 0; i1 < ne[1]; ++i1) {
+ const size_t offset_row = i1*row_size;
+ const size_t offset = offset_row + ((part_id*np0)/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
+ fin.read(reinterpret_cast<char *>(tensor->data) + offset, row_size/n_parts);
+ }
+ } else {
+ const int np1 = ne[1];
+
+ const size_t row_size = (tensor->ne[0]/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
+
+ for (int i1 = 0; i1 < ne[1]; ++i1) {
+ const size_t offset_row = (i1 + part_id*np1)*row_size;
+ fin.read(reinterpret_cast<char *>(tensor->data) + offset_row, row_size);
+ }
+ }
+
+ total_size += ggml_nbytes(tensor)/n_parts;
+ }
+
+ //fprintf(stderr, "%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
+ model.n_loaded++;
+
+ // progress
+ if (progress_callback) {
+ double current_file_progress = double(size_t(fin.tellg()) - file_offset) / double(file_size - file_offset);
+ double current_progress = (double(i) + current_file_progress) / double(n_parts);
+ progress_callback(current_progress, progress_callback_user_data);
+ }
+ if (model.n_loaded % 8 == 0) {
+ fprintf(stderr, ".");
+ fflush(stderr);
+ }
+ }
+
+ fprintf(stderr, " done\n");
+
+ fprintf(stderr, "%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, model.n_loaded);
+ if (model.n_loaded == 0) {
+ fprintf(stderr, "%s: WARN no tensors loaded from model file - assuming empty model for testing\n", __func__);
+ } else if (model.n_loaded != (int) model.tensors.size()) {
+ fprintf(stderr, "%s: ERROR not all tensors loaded from model file - expected %zu, got %d\n", __func__, model.tensors.size(), model.n_loaded);
+ return false;
+ }
+ }
+
+ fin.close();
+ }
+
+ lctx.t_load_us = ggml_time_us() - t_start_us;
+
+ if (progress_callback) {
+ progress_callback(1.0, progress_callback_user_data);
+ }
+
+ return true;
+}
+
+// evaluate the transformer
+//
+// - lctx: llama context
+// - tokens: new batch of tokens to process
+// - n_past: the context size so far
+// - n_threads: number of threads to use
+//
+static bool llama_eval_internal(
+ llama_context & lctx,
+ const llama_token * tokens,
+ const int n_tokens,
+ const int n_past,
+ const int n_threads) {
+ const int64_t t_start_us = ggml_time_us();
+
+ const int N = n_tokens;
+
+ const auto & model = lctx.model;
+ const auto & hparams = model.hparams;
+
+ auto & kv_self = model.kv_self;
+
+ LLAMA_ASSERT(!!kv_self.ctx);
+
+ const int n_embd = hparams.n_embd;
+ const int n_layer = hparams.n_layer;
+ const int n_ctx = hparams.n_ctx;
+ const int n_head = hparams.n_head;
+ const int n_vocab = hparams.n_vocab;
+ const int n_rot = hparams.n_embd/hparams.n_head;
+
+ auto & mem_per_token = lctx.mem_per_token;
+ auto & buf_compute = lctx.buf_compute;
+
+ struct ggml_init_params params = {
+ /*.mem_size =*/ buf_compute.size(),
+ /*.mem_buffer =*/ buf_compute.data(),
+ };
+
+ struct ggml_context * ctx0 = ggml_init(params);
+
+ // for big prompts, if BLAS is enabled, it is better to use only one thread
+ // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
+ ggml_cgraph gf = {};
+ gf.n_threads = N > 255 && ggml_cpu_has_blas() ? 1 : n_threads;
+
+ struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
+ memcpy(embd->data, tokens, N*ggml_element_size(embd));
+
+ struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.tok_embeddings, embd);
+
+ for (int il = 0; il < n_layer; ++il) {
+ struct ggml_tensor * inpSA = inpL;
+
+ struct ggml_tensor * cur;
+
+ lctx.use_buf(ctx0, 0);
+
+ // norm
+ {
+ cur = ggml_rms_norm(ctx0, inpL);
+
+ // cur = attention_norm*cur
+ cur = ggml_mul(ctx0,
+ ggml_repeat(ctx0, model.layers[il].attention_norm, cur),
+ cur);
+ }
+
+ // self-attention
+ {
+ struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
+ struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
+ struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
+
+ // store key and value to memory
+ if (N >= 1) {
+ struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd, (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
+ struct ggml_tensor * v = ggml_view_1d(ctx0, kv_self.v, N*n_embd, (ggml_element_size(kv_self.v)*n_embd)*(il*n_ctx + n_past));
+
+ ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
+ ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
+ }
+
+ // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
+ struct ggml_tensor * Q =
+ ggml_permute(ctx0,
+ ggml_rope(ctx0,
+ ggml_cpy(ctx0,
+ Qcur,
+ ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)),
+ n_past, n_rot, 0),
+ 0, 2, 1, 3);
+
+ // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
+ struct ggml_tensor * K =
+ ggml_permute(ctx0,
+ ggml_rope(ctx0,
+ ggml_reshape_3d(ctx0,
+ ggml_view_1d(ctx0, kv_self.k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(kv_self.k)*n_embd),
+ n_embd/n_head, n_head, n_past + N),
+ n_past, n_rot, 1),
+ 0, 2, 1, 3);
+
+ // K * Q
+ struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
+
+ // KQ_scaled = KQ / sqrt(n_embd/n_head)
+ struct ggml_tensor * KQ_scaled =
+ ggml_scale(ctx0,
+ KQ,
+ ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)));
+
+ // KQ_masked = mask_past(KQ_scaled)
+ struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
+
+ // KQ = soft_max(KQ_masked)
+ struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
+
+ // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
+ struct ggml_tensor * V_trans =
+ ggml_cpy(ctx0,
+ ggml_permute(ctx0,
+ ggml_reshape_3d(ctx0,
+ ggml_view_1d(ctx0, kv_self.v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(kv_self.v)*n_embd),
+ n_embd/n_head, n_head, n_past + N),
+ 1, 2, 0, 3),
+ ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd/n_head, n_head));
+
+ // KQV = transpose(V) * KQ_soft_max
+ struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max);
+
+ // KQV_merged = KQV.permute(0, 2, 1, 3)
+ struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
+
+ // cur = KQV_merged.contiguous().view(n_embd, N)
+ cur = ggml_cpy(ctx0,
+ KQV_merged,
+ ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
+
+ // projection (no bias)
+ cur = ggml_mul_mat(ctx0,
+ model.layers[il].wo,
+ cur);
+ }
+
+ lctx.use_buf(ctx0, 1);
+
+ struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
+
+ // feed-forward network
+ {
+ // norm
+ {
+ cur = ggml_rms_norm(ctx0, inpFF);
+
+ // cur = ffn_norm*cur
+ cur = ggml_mul(ctx0,
+ ggml_repeat(ctx0, model.layers[il].ffn_norm, cur),
+ cur);
+ }
+
+ struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
+ model.layers[il].w3,
+ cur);
+
+ cur = ggml_mul_mat(ctx0,
+ model.layers[il].w1,
+ cur);
+
+ // SILU activation
+ cur = ggml_silu(ctx0, cur);
+
+ cur = ggml_mul(ctx0, cur, tmp);
+
+ cur = ggml_mul_mat(ctx0,
+ model.layers[il].w2,
+ cur);
+ }
+
+ cur = ggml_add(ctx0, cur, inpFF);
+
+ // input for next layer
+ inpL = cur;
+ }
+
+ lctx.use_buf(ctx0, 0);
+
+ // used at the end to optionally extract the embeddings
+ struct ggml_tensor * embeddings = NULL;
+
+ // norm
+ {
+
+ inpL = ggml_rms_norm(ctx0, inpL);
+
+ // inpL = norm*inpL
+ inpL = ggml_mul(ctx0,
+ ggml_repeat(ctx0, model.norm, inpL),
+ inpL);
+
+ embeddings = inpL;
+ }
+
+ // lm_head
+ inpL = ggml_mul_mat(ctx0, model.output, inpL);
+
+ lctx.use_buf(ctx0, -1);
+
+ // logits -> probs
+ //inpL = ggml_soft_max(ctx0, inpL);
+
+ // run the computation
+ ggml_build_forward_expand(&gf, inpL);
+ ggml_graph_compute (ctx0, &gf);
+
+ //if (n_past%100 == 0) {
+ // ggml_graph_print (&gf);
+ // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
+ //}
+
+ //embd_w.resize(n_vocab*N);
+ //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
+
+ // extract logits
+ {
+ auto & logits_out = lctx.logits;
+
+ if (lctx.logits_all) {
+ logits_out.resize(n_vocab * N);
+ memcpy(logits_out.data(), (float *) ggml_get_data(inpL), sizeof(float)*n_vocab*N);
+ } else {
+ // return result for just the last token
+ logits_out.resize(n_vocab);
+ memcpy(logits_out.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
+ }
+ }
+
+ // extract embeddings
+ if (lctx.embedding.size()) {
+ auto & embedding_out = lctx.embedding;
+
+ embedding_out.resize(n_embd);
+ memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
+ }
+
+ if (mem_per_token == 0) {
+ mem_per_token = ggml_used_mem(ctx0)/N;
+ }
+
+#if 0
+ printf("\n%s: used_mem = %.3f MB, scratch -- %.3f MB %.3f MB\n", __func__,
+ ggml_used_mem(ctx0)/1024.0/1024.0,
+ lctx.get_buf_max_mem(0)/1024.0/1024.0,
+ lctx.get_buf_max_mem(1)/1024.0/1024.0);
+#endif
+
+ ggml_free(ctx0);
+
+ // measure the performance only for the single-token evals
+ if (N == 1) {
+ lctx.t_eval_us += ggml_time_us() - t_start_us;
+ lctx.n_eval++;
+ }
+ else if (N > 1) {
+ lctx.t_p_eval_us += ggml_time_us() - t_start_us;
+ lctx.n_p_eval += N;
+ }
+
+ return true;
+}
+
+//
+// tokenizer
+//
+
+static size_t utf8_len(char src) {
+ const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
+ uint8_t highbits = static_cast<uint8_t>(src) >> 4;
+ return lookup[highbits];
+}
+
+struct llama_sp_symbol {
+ using index = int;
+ index prev;
+ index next;
+ const char * text;
+ size_t n;
+};
+
+struct llama_sp_bigram {
+ struct comparator {
+ bool operator()(llama_sp_bigram & l, llama_sp_bigram & r) {
+ return (l.score < r.score) || (l.score == r.score && l.left > r.left);
+ }
+ };
+ using queue_storage = std::vector<llama_sp_bigram>;
+ using queue = std::priority_queue<llama_sp_bigram, queue_storage, comparator>;
+ llama_sp_symbol::index left;
+ llama_sp_symbol::index right;
+ float score;
+ size_t size;
+};
+
+// original implementation:
+// https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
+struct llama_tokenizer {
+ llama_tokenizer(const llama_vocab & vocab): vocab_(vocab) {}
+
+ void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
+ // split string into utf8 chars
+ int index = 0;
+ size_t offs = 0;
+ while (offs < text.size()) {
+ llama_sp_symbol sym;
+ size_t char_len = std::min(text.size() - offs, utf8_len(text[offs]));
+ sym.text = text.c_str() + offs;
+ sym.n = char_len;
+ offs += char_len;
+ sym.prev = index - 1;
+ sym.next = offs == text.size() ? -1 : index + 1;
+ index++;
+ symbols_.emplace_back(std::move(sym));
+ }
+
+ // seed the work queue with all possible 2-character tokens.
+ for (size_t i = 1; i < symbols_.size(); ++i) {
+ try_add_bigram(i - 1, i);
+ }
+
+ // keep substituting the highest frequency pairs for as long as we can.
+ while (!work_queue_.empty()) {
+ auto bigram = work_queue_.top();
+ work_queue_.pop();
+
+ auto & left_sym = symbols_[bigram.left];
+ auto & right_sym = symbols_[bigram.right];
+
+ // if one of the symbols already got merged, skip it.
+ if (left_sym.n == 0 || right_sym.n == 0 ||
+ left_sym.n + right_sym.n != bigram.size) {
+ continue;
+ }
+
+ // merge the right sym into the left one
+ left_sym.n += right_sym.n;
+ right_sym.n = 0;
+
+ //printf("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
+
+ // remove the right sym from the chain
+ left_sym.next = right_sym.next;
+ if (right_sym.next >= 0) {
+ symbols_[right_sym.next].prev = bigram.left;
+ }
+
+ // find more substitutions
+ try_add_bigram(left_sym.prev, bigram.left);
+ try_add_bigram(bigram.left, left_sym.next);
+ }
+
+ for (int i = 0; i != -1; i = symbols_[i].next) {
+ auto & symbol = symbols_[i];
+ auto token = vocab_.token_to_id.find(std::string(symbol.text, symbol.n));
+
+ if (token == vocab_.token_to_id.end()) {
+ // output any symbols that did not form tokens as bytes.
+ for (int j = 0; j < (int) symbol.n; ++j) {
+ llama_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3;
+ output.push_back(token_id);
+ }
+ } else {
+ output.push_back((*token).second);
+ }
+ }
+ }
+
+private:
+ void try_add_bigram(int left, int right) {
+ if (left == -1 || right == -1) {
+ return;
+ }
+
+ const std::string text = std::string(symbols_[left].text, symbols_[left].n + symbols_[right].n);
+ auto token = vocab_.token_to_id.find(text);
+
+ if (token == vocab_.token_to_id.end()) {
+ return;
+ }
+
+ if (static_cast<size_t>((*token).second) >= vocab_.id_to_token.size()) {
+ return;
+ }
+
+ const auto &tok_score = vocab_.id_to_token[(*token).second];
+
+ llama_sp_bigram bigram;
+ bigram.left = left;
+ bigram.right = right;
+ bigram.score = tok_score.score;
+ bigram.size = text.size();
+ work_queue_.push(bigram);
+ }
+
+ const llama_vocab & vocab_;
+ std::vector<llama_sp_symbol> symbols_;
+ llama_sp_bigram::queue work_queue_;
+};
+
+static std::vector<llama_vocab::id> llama_tokenize(const llama_vocab & vocab, const std::string & text, bool bos) {
+ llama_tokenizer tokenizer(vocab);
+ std::vector<llama_vocab::id> output;
+
+ if (text.size() == 0) {
+ return output;
+ }
+
+ if (bos) {
+ output.push_back(1);
+ }
+
+ tokenizer.tokenize(text, output);
+ return output;
+}
+
+//
+// sampling
+//
+
+static void sample_top_k(std::vector<std::pair<double, llama_vocab::id>> & logits_id, int top_k) {
+ // find the top k tokens
+ std::partial_sort(
+ logits_id.begin(),
+ logits_id.begin() + top_k, logits_id.end(),
+ [](const std::pair<double, llama_vocab::id> & a, const std::pair<double, llama_vocab::id> & b) {
+ return a.first > b.first;
+ });
+
+ logits_id.resize(top_k);
+}
+
+static llama_vocab::id llama_sample_top_p_top_k(
+ llama_context & lctx,
+ const std::vector<llama_vocab::id> & last_n_tokens,
+ int top_k,
+ double top_p,
+ double temp,
+ double repeat_penalty) {
+ auto & rng = lctx.rng;
+
+ const int n_logits = lctx.model.hparams.n_vocab;
+
+ const auto & logits = lctx.logits;
+ const auto * plogits = logits.data() + logits.size() - n_logits;
+
+ std::vector<std::pair<double, llama_vocab::id>> logits_id;
+ logits_id.reserve(n_logits);
+
+ {
+ const double scale = 1.0/temp;
+ for (int i = 0; i < n_logits; ++i) {
+ // repetition penalty from ctrl paper (https://arxiv.org/abs/1909.05858)
+ // credit https://github.com/facebookresearch/llama/compare/main...shawwn:llama:main
+ if (std::find(last_n_tokens.begin(), last_n_tokens.end(), i) != last_n_tokens.end()) {
+ // if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
+ if (plogits[i] < 0.0) {
+ logits_id.push_back(std::make_pair(plogits[i]*scale*repeat_penalty, i));
+ } else {
+ logits_id.push_back(std::make_pair(plogits[i]*scale/repeat_penalty, i));
+ }
+ } else {
+ logits_id.push_back(std::make_pair(plogits[i]*scale, i));
+ }
+ }
+ }
+
+ sample_top_k(logits_id, top_k);
+
+ double maxl = -std::numeric_limits<double>::infinity();
+ for (const auto & kv : logits_id) {
+ maxl = std::max(maxl, kv.first);
+ }
+
+ // compute probs for the top k tokens
+ std::vector<double> probs;
+ probs.reserve(logits_id.size());
+
+ double sum = 0.0;
+ for (const auto & kv : logits_id) {
+ double p = exp(kv.first - maxl);
+ probs.push_back(p);
+ sum += p;
+ }
+
+ // normalize the probs
+ for (auto & p : probs) {
+ p /= sum;
+ }
+
+ if (top_p < 1.0f) {
+ double cumsum = 0.0f;
+ for (int i = 0; i < (int) probs.size(); i++) {
+ cumsum += probs[i];
+ if (cumsum >= top_p) {
+ probs.resize(i + 1);
+ logits_id.resize(i + 1);
+ break;
+ }
+ }
+
+ cumsum = 1.0/cumsum;
+ for (int i = 0; i < (int) probs.size(); i++) {
+ probs[i] *= cumsum;
+ }
+ }
+
+ //printf("\n");
+ //for (int i = 0; i < (int) 10; i++) {
+ // printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]);
+ //}
+ //printf("\n\n");
+ //exit(0);
+
+ std::discrete_distribution<> dist(probs.begin(), probs.end());
+ int idx = dist(rng);
+
+ return logits_id[idx].second;
+}
+
+//
+// quantization
+//
+
+// TODO: reuse code from the llama_model_load() somehow
+bool llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, int itype, int qk) {
+ ggml_type type = GGML_TYPE_Q4_1;
+
+ switch (itype) {
+ case 2: type = GGML_TYPE_Q4_0; break;
+ case 3: type = GGML_TYPE_Q4_1; break;
+ default: fprintf(stderr, "%s: invalid quantization type %d\n", __func__, itype); return 1;
+ };
+
+ if (type != GGML_TYPE_Q4_0 && type != GGML_TYPE_Q4_1) {
+ fprintf(stderr, "%s: invalid quantization type %d\n", __func__, type);
+ return false;
+ }
+
+ llama_vocab vocab;
+
+ printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str());
+
+ auto finp = std::ifstream(fname_inp, std::ios::binary);
+ if (!finp) {
+ fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__, fname_inp.c_str());
+ return false;
+ }
+
+ auto fout = std::ofstream(fname_out, std::ios::binary);
+ if (!fout) {
+ fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname_out.c_str());
+ return false;
+ }
+
+ // verify magic
+ {
+ uint32_t magic;
+ finp.read((char *) &magic, sizeof(magic));
+ if (magic == LLAMA_FILE_MAGIC_UNVERSIONED) {
+ fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
+ __func__, fname_inp.c_str());
+ return false;
+ }
+ if (magic != LLAMA_FILE_MAGIC) {
+ fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname_inp.c_str());
+ return false;
+ }
+
+ fout.write((char *) &magic, sizeof(magic));
+
+ uint32_t format_version;
+ finp.read((char *) &format_version, sizeof(format_version));
+
+ if (format_version != LLAMA_FILE_VERSION) {
+ fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ", expected %d)\n",
+ __func__, fname_inp.c_str(), format_version, LLAMA_FILE_VERSION);
+ return false;
+ }
+
+ fout.write((char *) &format_version, sizeof(format_version));
+ }
+
+ llama_hparams hparams;
+
+ // load hparams
+ {
+ finp.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
+ //finp.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
+ finp.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
+ finp.read((char *) &hparams.n_mult, sizeof(hparams.n_mult));
+ finp.read((char *) &hparams.n_head, sizeof(hparams.n_head));
+ finp.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
+ finp.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
+ finp.read((char *) &hparams.f16, sizeof(hparams.f16));
+
+ printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
+ printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
+ printf("%s: n_embd = %d\n", __func__, hparams.n_embd);
+ printf("%s: n_mult = %d\n", __func__, hparams.n_mult);
+ printf("%s: n_head = %d\n", __func__, hparams.n_head);
+ printf("%s: n_layer = %d\n", __func__, hparams.n_layer);
+ printf("%s: f16 = %d\n", __func__, hparams.f16);
+
+ fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
+ //fout.write((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
+ fout.write((char *) &hparams.n_embd, sizeof(hparams.n_embd));
+ fout.write((char *) &hparams.n_mult, sizeof(hparams.n_mult));
+ fout.write((char *) &hparams.n_head, sizeof(hparams.n_head));
+ fout.write((char *) &hparams.n_layer, sizeof(hparams.n_layer));
+ fout.write((char *) &hparams.n_rot, sizeof(hparams.n_rot));
+ fout.write((char *) &itype, sizeof(hparams.f16));
+ }
+
+ // load vocab
+ {
+ const int32_t n_vocab = hparams.n_vocab;
+
+ if (n_vocab != hparams.n_vocab) {
+ fprintf(stderr, "%s: invalid model file '%s' (bad vocab size %d != %d)\n",
+ __func__, fname_inp.c_str(), n_vocab, hparams.n_vocab);
+ return false;
+ }
+
+ std::string word;
+ vocab.id_to_token.resize(n_vocab);
+ for (int i = 0; i < n_vocab; i++) {
+ uint32_t len;
+ finp.read ((char *) &len, sizeof(len));
+ fout.write((char *) &len, sizeof(len));
+
+ word.resize(len);
+ finp.read ((char *) word.data(), len);
+ fout.write((char *) word.data(), len);
+
+ float score;
+ finp.read ((char *) &score, sizeof(score));
+ fout.write((char *) &score, sizeof(score));
+
+ vocab.token_to_id[word] = i;
+
+ auto &tok_score = vocab.id_to_token[i];
+ tok_score.tok = word;
+ tok_score.score = score;
+ }
+ }
+
+ // load weights
+ {
+ size_t total_size_org = 0;
+ size_t total_size_new = 0;
+
+ std::vector<float> work;
+
+ std::vector<uint8_t> data_u8;
+ std::vector<ggml_fp16_t> data_f16;
+ std::vector<float> data_f32;
+
+ std::vector<int64_t> hist_all(1 << 4, 0);
+
+ while (true) {
+ int32_t n_dims;
+ int32_t length;
+ int32_t ftype;
+
+ finp.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
+ finp.read(reinterpret_cast<char *>(&length), sizeof(length));
+ finp.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
+
+ if (finp.eof()) {
+ break;
+ }
+
+ int32_t nelements = 1;
+ int32_t ne[2] = { 1, 1 };
+ for (int i = 0; i < n_dims; ++i) {
+ finp.read (reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
+ nelements *= ne[i];
+ }
+
+ std::string name(length, 0);
+ finp.read (&name[0], length);
+
+ {
+ static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", };
+ printf("%48s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ftype_str[ftype]);
+ }
+
+ // regexes of tensor names to be quantized
+ const std::vector<std::string> k_names = {
+ ".*weight",
+ };
+
+ bool quantize = false;
+ for (const auto & s : k_names) {
+ if (std::regex_match(name, std::regex(s))) {
+ quantize = true;
+ break;
+ }
+ }
+
+ // quantize only 2D tensors
+ quantize &= (n_dims == 2);
+
+ if (quantize) {
+ if (ftype != 0 && ftype != 1) {
+ fprintf(stderr, "%s: unsupported ftype %d for integer quantization\n", __func__, ftype);
+ return false;
+ }
+
+ if (ftype == 1) {
+ data_f16.resize(nelements);
+ finp.read(reinterpret_cast<char *>(data_f16.data()), nelements * sizeof(ggml_fp16_t));
+ data_f32.resize(nelements);
+ for (int i = 0; i < nelements; ++i) {
+ data_f32[i] = ggml_fp16_to_fp32(data_f16[i]);
+ }
+ } else {
+ data_f32.resize(nelements);
+ finp.read(reinterpret_cast<char *>(data_f32.data()), nelements * sizeof(float));
+ }
+
+ ftype = itype;
+ } else {
+ const int bpe = (ftype == 0) ? sizeof(float) : sizeof(uint16_t);
+
+ data_u8.resize(nelements*bpe);
+ finp.read(reinterpret_cast<char *>(data_u8.data()), nelements * bpe);
+ }
+
+ fout.write(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
+ fout.write(reinterpret_cast<char *>(&length), sizeof(length));
+ fout.write(reinterpret_cast<char *>(&ftype), sizeof(ftype));
+ for (int i = 0; i < n_dims; ++i) {
+ fout.write(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
+ }
+ fout.write(&name[0], length);
+
+ if (quantize) {
+ printf("quantizing .. ");
+ work.resize(nelements); // for quantization
+
+ size_t cur_size = 0;
+ std::vector<int64_t> hist_cur(1 << 4, 0);
+
+ switch (type) {
+ case GGML_TYPE_Q4_0:
+ {
+ cur_size = ggml_quantize_q4_0(data_f32.data(), work.data(), nelements, ne[0], qk, hist_cur.data());
+ } break;
+ case GGML_TYPE_Q4_1:
+ {
+ cur_size = ggml_quantize_q4_1(data_f32.data(), work.data(), nelements, ne[0], qk, hist_cur.data());
+ } break;
+ default:
+ {
+ fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, type);
+ return false;
+ }
+ }
+
+ fout.write(reinterpret_cast<char *>(work.data()), cur_size);
+ total_size_new += cur_size;
+
+ printf("size = %8.2f MB -> %8.2f MB | hist: ", nelements * sizeof(float)/1024.0/1024.0, cur_size/1024.0/1024.0);
+ for (int i = 0; i < (int) hist_cur.size(); ++i) {
+ hist_all[i] += hist_cur[i];
+ }
+
+ for (int i = 0; i < (int) hist_cur.size(); ++i) {
+ printf("%5.3f ", hist_cur[i] / (float)nelements);
+ }
+ printf("\n");
+ } else {
+ printf("size = %8.3f MB\n", data_u8.size()/1024.0/1024.0);
+ fout.write(reinterpret_cast<char *>(data_u8.data()), data_u8.size());
+ total_size_new += data_u8.size();
+ }
+
+ total_size_org += nelements * sizeof(float);
+ }
+
+ printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
+ printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
+
+ {
+ int64_t sum_all = 0;
+ for (int i = 0; i < (int) hist_all.size(); ++i) {
+ sum_all += hist_all[i];
+ }
+
+ printf("%s: hist: ", __func__);
+ for (int i = 0; i < (int) hist_all.size(); ++i) {
+ printf("%5.3f ", hist_all[i] / (float)sum_all);
+ }
+ printf("\n");
+ }
+ }
+
+ finp.close();
+ fout.close();
+
+ return true;
+}
+
+//
+// interface implementation
+//
+
+struct llama_context * llama_init_from_file(
+ const char * path_model,
+ struct llama_context_params params) {
+ ggml_time_init();
+
+ llama_context * ctx = new llama_context;
+
+ if (params.seed <= 0) {
+ params.seed = time(NULL);
+ }
+
+ ctx->rng = std::mt19937(params.seed);
+ ctx->logits_all = params.logits_all;
+
+ ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
+
+ if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_parts, memory_type,
+ params.vocab_only, params.progress_callback,
+ params.progress_callback_user_data)) {
+ fprintf(stderr, "%s: failed to load model\n", __func__);
+ llama_free(ctx);
+ return nullptr;
+ }
+
+ if (params.use_mlock) {
+ char *err;
+ if (!ggml_mlock(ctx->model.ctx, &err)) {
+ fprintf(stderr, "%s\n", err);
+ free(err);
+ llama_free(ctx);
+ return nullptr;
+ }
+ }
+
+ // reserve memory for context buffers
+ {
+ if (!kv_cache_init(ctx->model.hparams, ctx->model.kv_self, memory_type, ctx->model.hparams.n_ctx)) {
+ fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__);
+ llama_free(ctx);
+ return nullptr;
+ }
+
+ {
+ const size_t memory_size = ggml_nbytes(ctx->model.kv_self.k) + ggml_nbytes(ctx->model.kv_self.v);
+ fprintf(stderr, "%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
+ }
+
+ const auto & hparams = ctx->model.hparams;
+
+ // resized during inference
+ if (params.logits_all) {
+ ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab);
+ } else {
+ ctx->logits.reserve(hparams.n_ctx);
+ }
+
+ if (params.embedding){
+ ctx->embedding.resize(hparams.n_embd);
+ }
+
+ ctx->buf_compute.resize(MEM_REQ_EVAL.at(ctx->model.type));
+
+ ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0.at(ctx->model.type));
+ ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1.at(ctx->model.type));
+ }
+
+ return ctx;
+}
+
+void llama_free(struct llama_context * ctx) {
+ kv_cache_free(ctx->model.kv_self);
+
+ if (ctx->model.ctx) {
+ ggml_free(ctx->model.ctx);
+ }
+
+ delete ctx;
+}
+
+int llama_model_quantize(
+ const char * fname_inp,
+ const char * fname_out,
+ int itype,
+ int qk) {
+ if (!llama_model_quantize_internal(fname_inp, fname_out, itype, qk)) {
+ fprintf(stderr, "%s: failed to quantize\n", __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
+int llama_eval(
+ struct llama_context * ctx,
+ const llama_token * tokens,
+ int n_tokens,
+ int n_past,
+ int n_threads) {
+ if (!llama_eval_internal(*ctx, tokens, n_tokens, n_past, n_threads)) {
+ fprintf(stderr, "%s: failed to eval\n", __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
+int llama_tokenize(
+ struct llama_context * ctx,
+ const char * text,
+ llama_token * tokens,
+ int n_max_tokens,
+ bool add_bos) {
+ auto res = llama_tokenize(ctx->vocab, text, add_bos);
+
+ if (n_max_tokens < (int) res.size()) {
+ fprintf(stderr, "%s: too many tokens\n", __func__);
+ return -((int) res.size());
+ }
+
+ for (size_t i = 0; i < res.size(); i++) {
+ tokens[i] = res[i];
+ }
+
+ return res.size();
+}
+
+int llama_n_vocab(struct llama_context * ctx) {
+ return ctx->vocab.id_to_token.size();
+}
+
+int llama_n_ctx(struct llama_context * ctx) {
+ return ctx->model.hparams.n_ctx;
+}
+
+int llama_n_embd(struct llama_context * ctx) {
+ return ctx->model.hparams.n_embd;
+}
+
+float * llama_get_logits(struct llama_context * ctx) {
+ return ctx->logits.data();
+}
+
+float * llama_get_embeddings(struct llama_context * ctx) {
+ return ctx->embedding.data();
+}
+
+const char * llama_token_to_str(struct llama_context * ctx, llama_token token) {
+ if (token >= llama_n_vocab(ctx)) {
+ return nullptr;
+ }
+
+ return ctx->vocab.id_to_token[token].tok.c_str();
+}
+
+llama_token llama_token_bos() {
+ return 1;
+}
+
+llama_token llama_token_eos() {
+ return 2;
+}
+
+llama_token llama_sample_top_p_top_k(
+ llama_context * ctx,
+ const llama_token * last_n_tokens_data,
+ int last_n_tokens_size,
+ int top_k,
+ double top_p,
+ double temp,
+ double repeat_penalty) {
+ const int64_t t_start_sample_us = ggml_time_us();
+
+ llama_token result = 0;
+
+ // TODO: avoid this ...
+ const auto last_n_tokens = std::vector<llama_token>(last_n_tokens_data, last_n_tokens_data + last_n_tokens_size);
+
+ result = llama_sample_top_p_top_k(
+ *ctx,
+ last_n_tokens,
+ top_k,
+ top_p,
+ temp,
+ repeat_penalty);
+
+ ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
+ ctx->n_sample++;
+
+ return result;
+}
+
+
+void llama_print_timings(struct llama_context * ctx) {
+ const int64_t t_end_us = ggml_time_us();
+
+ const int32_t n_sample = std::max(1, ctx->n_sample);
+ const int32_t n_eval = std::max(1, ctx->n_eval);
+ const int32_t n_p_eval = std::max(1, ctx->n_p_eval);
+
+ fprintf(stderr, "\n");
+ fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0f);
+ fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3f * ctx->t_sample_us, n_sample, 1e-3f * ctx->t_sample_us / n_sample);
+ fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token)\n", __func__, 1e-3f * ctx->t_p_eval_us, n_p_eval, 1e-3f * ctx->t_p_eval_us / n_p_eval);
+ fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per run)\n", __func__, 1e-3f * ctx->t_eval_us, n_eval, 1e-3f * ctx->t_eval_us / n_eval);
+ fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_end_us - ctx->t_start_us)/1000.0f);
+}
+
+void llama_reset_timings(struct llama_context * ctx) {
+ ctx->t_start_us = ggml_time_us();
+
+ ctx->t_sample_us = ctx->n_sample = 0;
+ ctx->t_eval_us = ctx->n_eval = 0;
+ ctx->t_p_eval_us = ctx->n_p_eval = 0;
+}
+
+const char * llama_print_system_info(void) {
+ static std::string s;
+
+ s = "";
+ s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
+ s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
+ s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
+ s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
+ s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
+ s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
+ s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
+ s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
+ s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
+ s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
+ s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
+ s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
+
+ return s.c_str();
+}
+// Defines CLOCK_MONOTONIC and asprintf on Linux
+#define _GNU_SOURCE
+
#include "ggml.h"
#if defined(_MSC_VER) || defined(__MINGW32__)
#include <malloc.h> // using malloc.h with MSC/MINGW
-#elif !defined(__FreeBSD__)
+#elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
#include <alloca.h>
#endif
#include <assert.h>
+#include <errno.h>
#include <time.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <stdio.h>
+#include <float.h>
// if C99 - static_assert is noop
// ref: https://stackoverflow.com/a/53923785/4039976
#else
// ref: https://github.com/ggerganov/whisper.cpp/issues/168
#include <windows.h>
-#include <errno.h>
#endif
typedef volatile LONG atomic_int;
#define static_assert(cond, msg) _Static_assert(cond, msg)
#endif
+#define GGML_MLOCK_SUPPORT 0
+
+#ifdef __has_include
+ #if __has_include(<sys/mman.h>)
+ #undef GGML_MLOCK_SUPPORT
+ #define GGML_MLOCK_SUPPORT 1
+ #include <sys/mman.h>
+ #endif
+#endif
+
+
/*#define GGML_PERF*/
#define GGML_DEBUG 0
#define GGML_GELU_FP16
+#define GGML_SILU_FP16
#define GGML_SOFT_MAX_UNROLL 4
#define GGML_VEC_DOT_UNROLL 2
#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
+#elif defined(__POWER9_VECTOR__)
+
+#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
+#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
+/* the inline asm below is about 12% faster than the lookup method */
+#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
+#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
+
+static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
+ register float f;
+ register double d;
+ __asm__(
+ "mtfprd %0,%2\n"
+ "xscvhpdp %0,%0\n"
+ "frsp %1,%0\n" :
+ /* temp */ "=d"(d),
+ /* out */ "=f"(f):
+ /* in */ "r"(h));
+ return f;
+}
+
+static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
+ register double d;
+ register ggml_fp16_t r;
+ __asm__( /* xscvdphp can work on double or single precision */
+ "xscvdphp %0,%2\n"
+ "mffprd %1,%0\n" :
+ /* temp */ "=d"(d),
+ /* out */ "=r"(r):
+ /* in */ "f"(f));
+ return r;
+}
+
#else
// FP16 <-> FP32
// precomputed gelu table for f16 (128 KB)
static ggml_fp16_t table_gelu_f16[1 << 16];
+// precomputed silu table for f16 (128 KB)
+static ggml_fp16_t table_silu_f16[1 << 16];
+
// precomputed exp table for f16 (128 KB)
static ggml_fp16_t table_exp_f16[1 << 16];
// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
+// This is also true for POWER9.
#if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
+//
+// quantization
+//
+
+#define QK 32
+
+// AVX routines provided by GH user Const-me
+// ref: https://github.com/ggerganov/ggml/pull/27#issuecomment-1464934600
+#if __AVX2__ || __AVX512F__
+// Unpack 32 4-bit fields into 32 bytes
+// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
+static inline __m256i bytesFromNibbles( const uint8_t* rsi )
+{
+ // Load 16 bytes from memory
+ __m128i tmp = _mm_loadu_si128( ( const __m128i* )rsi );
+
+ // Expand bytes into uint16_t values
+ __m256i bytes = _mm256_cvtepu8_epi16( tmp );
+
+ // Unpack values into individual bytes
+ const __m256i lowMask = _mm256_set1_epi8( 0xF );
+ __m256i high = _mm256_andnot_si256( lowMask, bytes );
+ __m256i low = _mm256_and_si256( lowMask, bytes );
+ high = _mm256_slli_epi16( high, 4 );
+ bytes = _mm256_or_si256( low, high );
+ return bytes;
+}
+
+static inline __m128i packNibbles( __m256i bytes )
+{
+ // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
+ const __m256i lowByte = _mm256_set1_epi16( 0xFF );
+ __m256i high = _mm256_andnot_si256( lowByte, bytes );
+ __m256i low = _mm256_and_si256( lowByte, bytes );
+ high = _mm256_srli_epi16( high, 4 );
+ bytes = _mm256_or_si256( low, high );
+
+ // Compress uint16_t lanes into bytes
+ __m128i r0 = _mm256_castsi256_si128( bytes );
+ __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
+ return _mm_packus_epi16( r0, r1 );
+}
+#endif
+
+// method 5
+// blocks of QK elements
+// represented with a single float (delta) and QK/2 8-bit ints (i.e QK 4-bit signed integer factors)
+
+// reference implementation for deterministic creation of model files
+static void quantize_row_q4_0_reference(const float * restrict x, void * restrict y, int k) {
+ assert(k % QK == 0);
+ const int nb = k / QK;
+
+ const size_t bs = sizeof(float) + QK/2;
+
+ uint8_t * restrict pd = ((uint8_t *)y + 0*bs);
+ uint8_t * restrict pb = ((uint8_t *)y + 0*bs + sizeof(float));
+
+ uint8_t pp[QK/2];
+
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+
+ for (int l = 0; l < QK; l++) {
+ const float v = x[i*QK + l];
+ amax = MAX(amax, fabsf(v));
+ }
+
+ const float d = amax / ((1 << 3) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ *(float *)pd = d;
+ pd += bs;
+
+ for (int l = 0; l < QK; l += 2) {
+ const float v0 = x[i*QK + l + 0]*id;
+ const float v1 = x[i*QK + l + 1]*id;
+
+ const uint8_t vi0 = ((int8_t) (round(v0))) + 8;
+ const uint8_t vi1 = ((int8_t) (round(v1))) + 8;
+
+ assert(vi0 >= 0 && vi0 < 16);
+ assert(vi1 >= 0 && vi1 < 16);
+
+ pp[l/2] = vi0 | (vi1 << 4);
+ }
+
+ memcpy(pb, pp, sizeof(pp));
+ pb += bs;
+ }
+}
+
+void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
+ assert(k % QK == 0);
+
+#if defined(__ARM_NEON) || defined(__AVX2__) || defined(__wasm_simd128__) || defined(__POWER9_VECTOR__)
+ const int nb = k / QK;
+ const size_t bs = sizeof(float) + QK/2;
+
+ uint8_t * restrict pd = ((uint8_t *)y + 0*bs);
+ uint8_t * restrict pb = ((uint8_t *)y + 0*bs + sizeof(float));
+
+ uint8_t pp[QK/2];
+#endif
+
+#if defined(__POWER9_VECTOR__)
+ const vector float v85 = vec_splats(8.5f);
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+
+ vector float srcv [8];
+ vector float asrcv[8];
+ vector float amaxv[8];
+
+ for (int l = 0; l < 8; l++) srcv[l] = *(vector float *)(x + i*32 + 4*l);
+ for (int l = 0; l < 8; l++) asrcv[l] = vec_abs(srcv[l]);
+
+ for (int l = 0; l < 4; l++) amaxv[2*l] = vec_max(asrcv[2*l], asrcv[2*l+1]);
+ //for (int l = 0; l < 2; l++) amaxv[4*l] = vec_max(amaxv[4*l], amaxv[4*l+2]);
+ amaxv[0] = vec_max(amaxv[0], amaxv[2]);
+ amaxv[4] = vec_max(amaxv[4], amaxv[6]);
+ //for (int l = 0; l < 1; l++) amaxv[8*l] = vec_max(amaxv[8*l], amaxv[8*l+4]);
+ amaxv[0] = vec_max(amaxv[0], amaxv[4]);
+
+ amax = MAX(
+ MAX(vec_extract(amaxv[0], 0), vec_extract(amaxv[0], 1)),
+ MAX(vec_extract(amaxv[0], 2), vec_extract(amaxv[0], 3)));
+
+ const float d = amax / ((1 << 3) - 1);
+ const float id = d ? 1.0/d : 0.0;
+
+ *(float *)pd = d;
+ pd += bs;
+
+ const vector float vid = vec_splats(id);
+ for (int l = 0; l < 8; l++) {
+ const vector float vf = vec_madd(srcv[l], vid, v85);
+ const vector signed int vi = vec_signed(vf);
+
+ pb[2*l + 0] = vec_extract(vi, 0) | (vec_extract(vi, 1) << 4);
+ pb[2*l + 1] = vec_extract(vi, 2) | (vec_extract(vi, 3) << 4);
+ }
+
+ //memcpy(pb, pp, sizeof(pp));
+ pb += bs;
+ }
+#elif __ARM_NEON
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+
+ float32x4_t srcv [8];
+ float32x4_t asrcv[8];
+ float32x4_t amaxv[8];
+
+ for (int l = 0; l < 8; l++) srcv[l] = vld1q_f32(x + i*32 + 4*l);
+ for (int l = 0; l < 8; l++) asrcv[l] = vabsq_f32(srcv[l]);
+
+ for (int l = 0; l < 4; l++) amaxv[2*l] = vmaxq_f32(asrcv[2*l], asrcv[2*l+1]);
+ for (int l = 0; l < 2; l++) amaxv[4*l] = vmaxq_f32(amaxv[4*l], amaxv[4*l+2]);
+ for (int l = 0; l < 1; l++) amaxv[8*l] = vmaxq_f32(amaxv[8*l], amaxv[8*l+4]);
+
+ amax = MAX(
+ MAX(vgetq_lane_f32(amaxv[0], 0), vgetq_lane_f32(amaxv[0], 1)),
+ MAX(vgetq_lane_f32(amaxv[0], 2), vgetq_lane_f32(amaxv[0], 3)));
+
+ const float d = amax / ((1 << 3) - 1);
+ const float id = d ? 1.0/d : 0.0;
+
+ *(float *)pd = d;
+ pd += bs;
+
+ for (int l = 0; l < 8; l++) {
+ const float32x4_t v = vmulq_n_f32(srcv[l], id);
+ const float32x4_t vf = vaddq_f32(v, vdupq_n_f32(8.5f));
+ const int32x4_t vi = vcvtq_s32_f32(vf);
+
+ pp[2*l + 0] = vgetq_lane_s32(vi, 0) | (vgetq_lane_s32(vi, 1) << 4);
+ pp[2*l + 1] = vgetq_lane_s32(vi, 2) | (vgetq_lane_s32(vi, 3) << 4);
+ }
+
+ memcpy(pb, pp, sizeof(pp));
+ pb += bs;
+ }
+#elif defined(__AVX2__)
+ for (int i = 0; i < nb; i++) {
+ // Load elements into 4 AVX vectors
+ __m256 v0 = _mm256_loadu_ps( x );
+ __m256 v1 = _mm256_loadu_ps( x + 8 );
+ __m256 v2 = _mm256_loadu_ps( x + 16 );
+ __m256 v3 = _mm256_loadu_ps( x + 24 );
+ x += 32;
+
+ // Compute max(abs(e)) for the block
+ const __m256 signBit = _mm256_set1_ps( -0.0f );
+ __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
+
+ __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
+ max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
+ max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
+ const float maxScalar = _mm_cvtss_f32( max4 );
+
+ // Quantize these floats
+ const float d = maxScalar / 7.0f;
+ *(float *)pd = d;
+ pd += bs;
+ const float id = ( maxScalar != 0.0f ) ? 7.0f / maxScalar : 0.0f;
+ const __m256 mul = _mm256_set1_ps( id );
+
+ // Apply the multiplier
+ v0 = _mm256_mul_ps( v0, mul );
+ v1 = _mm256_mul_ps( v1, mul );
+ v2 = _mm256_mul_ps( v2, mul );
+ v3 = _mm256_mul_ps( v3, mul );
+
+ // Round to nearest integer
+ v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
+ v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
+ v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
+ v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
+
+ // Convert floats to integers
+ __m256i i0 = _mm256_cvtps_epi32( v0 );
+ __m256i i1 = _mm256_cvtps_epi32( v1 );
+ __m256i i2 = _mm256_cvtps_epi32( v2 );
+ __m256i i3 = _mm256_cvtps_epi32( v3 );
+
+ // Convert int32 to int16
+ i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
+ i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
+ // Convert int16 to int8
+ i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+
+ // We got our precious signed bytes, but the order is now wrong
+ // These AVX2 pack instructions process 16-byte pieces independently
+ // The following instruction is fixing the order
+ const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
+ i0 = _mm256_permutevar8x32_epi32( i0, perm );
+
+ // Apply offset to translate the range from [ -7 .. +7 ] into [ +1 .. +15 ]
+ const __m256i off = _mm256_set1_epi8( 8 );
+ i0 = _mm256_add_epi8( i0, off );
+
+ // Compress the vector into 4 bit/value, and store
+ __m128i res = packNibbles( i0 );
+ _mm_storeu_si128( ( __m128i* )pb, res );
+ pb += bs;
+ }
+#elif defined(__wasm_simd128__)
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+
+ v128_t srcv [8];
+ v128_t asrcv[8];
+ v128_t amaxv[8];
+
+ for (int l = 0; l < 8; l++) srcv[l] = wasm_v128_load(x + i*32 + 4*l);
+ for (int l = 0; l < 8; l++) asrcv[l] = wasm_f32x4_abs(srcv[l]);
+
+ for (int l = 0; l < 4; l++) amaxv[2*l] = wasm_f32x4_max(asrcv[2*l], asrcv[2*l+1]);
+ for (int l = 0; l < 2; l++) amaxv[4*l] = wasm_f32x4_max(amaxv[4*l], amaxv[4*l+2]);
+ for (int l = 0; l < 1; l++) amaxv[8*l] = wasm_f32x4_max(amaxv[8*l], amaxv[8*l+4]);
+
+ amax = MAX(
+ MAX(wasm_f32x4_extract_lane(amaxv[0], 0), wasm_f32x4_extract_lane(amaxv[0], 1)),
+ MAX(wasm_f32x4_extract_lane(amaxv[0], 2), wasm_f32x4_extract_lane(amaxv[0], 3)));
+
+ const float d = amax / ((1 << 3) - 1);
+ const float id = d ? 1.0/d : 0.0;
+
+ *(float *)pd = d;
+ pd += bs;
+
+ for (int l = 0; l < 8; l++) {
+ const v128_t v = wasm_f32x4_mul(srcv[l], wasm_f32x4_splat(id));
+ const v128_t vf = wasm_f32x4_add(v, wasm_f32x4_splat(8.5f));
+ const v128_t vi = wasm_i32x4_trunc_sat_f32x4(vf);
+
+ pp[2*l + 0] = wasm_i32x4_extract_lane(vi, 0) | (wasm_i32x4_extract_lane(vi, 1) << 4);
+ pp[2*l + 1] = wasm_i32x4_extract_lane(vi, 2) | (wasm_i32x4_extract_lane(vi, 3) << 4);
+ }
+
+ memcpy(pb, pp, sizeof(pp));
+ pb += bs;
+ }
+#else
+ // scalar
+ quantize_row_q4_0_reference(x, y, k);
+#endif
+}
+
+// method 4
+// blocks of QK elements
+// represented with 2 floats (min + delta) and QK/2 8-bit ints (i.e QK 4-bit unsigned integer factors)
+void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
+ assert(k % QK == 0);
+
+ const int nb = k / QK;
+ const size_t bs = 2*sizeof(float) + QK/2;
+
+ uint8_t * restrict pd = ((uint8_t *)y + 0*bs);
+ uint8_t * restrict pm = ((uint8_t *)y + 0*bs + sizeof(float));
+ uint8_t * restrict pb = ((uint8_t *)y + 0*bs + 2*sizeof(float));
+
+ uint8_t pp[QK/2];
+
+ for (int i = 0; i < nb; i++) {
+ float min = FLT_MAX;
+ float max = -FLT_MAX;
+
+ for (int l = 0; l < QK; l++) {
+ const float v = x[i*QK + l];
+ if (v < min) min = v;
+ if (v > max) max = v;
+ }
+
+ const float d = (max - min) / ((1 << 4) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ *(float *)pm = min;
+ *(float *)pd = d;
+ pm += bs;
+ pd += bs;
+
+ for (int l = 0; l < QK; l += 2) {
+ const float v0 = (x[i*QK + l + 0] - min)*id;
+ const float v1 = (x[i*QK + l + 1] - min)*id;
+
+ const uint8_t vi0 = round(v0);
+ const uint8_t vi1 = round(v1);
+
+ assert(vi0 >= 0 && vi0 < 16);
+ assert(vi1 >= 0 && vi1 < 16);
+
+ pp[l/2] = vi0 | (vi1 << 4);
+ }
+
+ memcpy(pb, pp, sizeof(pp));
+ pb += bs;
+ }
+}
+
+// TODO: vectorize
+void dequantize_row_q4_0(const void * restrict x, float * restrict y, int k) {
+ assert(k % QK == 0);
+
+ const int nb = k / QK;
+ const size_t bs = sizeof(float) + QK/2;
+
+ const uint8_t * restrict pd = ((const uint8_t *)x + 0*bs);
+ const uint8_t * restrict pb = ((const uint8_t *)x + 0*bs + sizeof(float));
+
+#if defined(__AVX2__)
+ for (int i = 0; i < nb; i++) {
+ // scale factor
+ const __m256 d_v = _mm256_broadcast_ss((const float *) (pd + i*bs));
+
+ const uint8_t * restrict pp = pb + i*bs;
+
+ for (int l = 0; l < QK; l += 32) {
+ // Load 32x4-bit integers into 32x8-bit integers
+ __m256i vx8 = bytesFromNibbles(pp+l/2);
+
+ // Subtract 8 from the integers
+ vx8 = _mm256_sub_epi8(vx8, _mm256_set1_epi8(8));
+
+ // Convert to 16-bit int
+ const __m256i vx16_lo = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 0));
+ const __m256i vx16_hi = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 1));
+
+ // Convert to 32-bit int -> float 32
+ const __m256 vf[4] = {
+ _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 0))),
+ _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 1))),
+ _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 0))),
+ _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 1)))
+ };
+
+ // Scale and store
+ for (int j = 0; j < 4; j++) {
+ const __m256 result = _mm256_mul_ps(vf[j], d_v);
+ _mm256_storeu_ps(y + i * QK + l + j*8, result);
+ }
+ }
+ }
+#elif defined(__ARM_NEON)
+ for (int i = 0; i < nb; i++) {
+ const float d = *(const float *) (pd + i*bs);
+
+ const uint8_t * restrict pp = pb + i*bs;
+
+ const float32x4_t vd = vdupq_n_f32(d);
+
+ for (int l = 0; l < QK; l += 16) {
+ // Load 16x4-bit integers into 8x8-bit integers
+ const uint8x8_t v8 = vld1_u8(pp + l/2);
+
+ // Expand 4-bit nibbles to 8-bit bytes
+ const uint8x8_t v0 = vand_u8(v8, vdup_n_u8(0x0f));
+ const uint8x8_t v1 = vshr_n_u8(v8, 4);
+
+ // Convert to signed 8-bit integers
+ const int8x8_t vs_0 = vreinterpret_s8_u8(v0);
+ const int8x8_t vs_1 = vreinterpret_s8_u8(v1);
+
+ // Subtract 8 from each byte
+ const int8x8_t vb_0 = vsub_s8(vs_0, vdup_n_s8(8));
+ const int8x8_t vb_1 = vsub_s8(vs_1, vdup_n_s8(8));
+
+ // Interleave and combine
+ const int8x8_t vx_0 = vzip1_s8(vb_0, vb_1);
+ const int8x8_t vx_1 = vzip2_s8(vb_0, vb_1);
+
+ const int8x16_t vq = vcombine_s8(vx_0, vx_1);
+
+ // convert to 2x int16x8_t
+ const int16x8_t vi_0 = vmovl_s8(vget_low_s8 (vq));
+ const int16x8_t vi_1 = vmovl_s8(vget_high_s8(vq));
+
+ // convert to 4x float32x4_t
+ const float32x4_t vf_0 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (vi_0)));
+ const float32x4_t vf_1 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(vi_0)));
+ const float32x4_t vf_2 = vcvtq_f32_s32(vmovl_s16(vget_low_s16 (vi_1)));
+ const float32x4_t vf_3 = vcvtq_f32_s32(vmovl_s16(vget_high_s16(vi_1)));
+
+ // Multiply by d
+ const float32x4_t r0 = vmulq_f32(vf_0, vd);
+ const float32x4_t r1 = vmulq_f32(vf_1, vd);
+ const float32x4_t r2 = vmulq_f32(vf_2, vd);
+ const float32x4_t r3 = vmulq_f32(vf_3, vd);
+
+ // Store
+ vst1q_f32(y + i*QK + l + 0, r0);
+ vst1q_f32(y + i*QK + l + 4, r1);
+ vst1q_f32(y + i*QK + l + 8, r2);
+ vst1q_f32(y + i*QK + l + 12, r3);
+ }
+ }
+#else
+ // scalar
+ for (int i = 0; i < nb; i++) {
+ const float d = *(const float *) (pd + i*bs);
+
+ const uint8_t * restrict pp = pb + i*bs;
+
+ for (int l = 0; l < QK; l += 2) {
+ const uint8_t vi = pp[l/2];
+
+ const int8_t vi0 = vi & 0xf;
+ const int8_t vi1 = vi >> 4;
+
+ const float v0 = (vi0 - 8)*d;
+ const float v1 = (vi1 - 8)*d;
+
+ //printf("d = %f, vi = %d, vi0 = %d, vi1 = %d, v0 = %f, v1 = %f\n", d, vi, vi0, vi1, v0, v1);
+
+ y[i*QK + l + 0] = v0;
+ y[i*QK + l + 1] = v1;
+
+ assert(!isnan(y[i*QK + l + 0]));
+ assert(!isnan(y[i*QK + l + 1]));
+ }
+ }
+#endif
+}
+
+void dequantize_row_q4_1(const void * restrict x, float * restrict y, int k) {
+ assert(k % QK == 0);
+
+ const int nb = k / QK;
+ const size_t bs = 2*sizeof(float) + QK/2;
+
+ const uint8_t * restrict pd = ((const uint8_t *)x + 0*bs);
+ const uint8_t * restrict pm = ((const uint8_t *)x + 0*bs + sizeof(float));
+ const uint8_t * restrict pb = ((const uint8_t *)x + 0*bs + 2*sizeof(float));
+
+#if defined(__AVX2__)
+ for (int i = 0; i < nb; i++) {
+ const __m256 d_v = _mm256_broadcast_ss((const float *) (pd + i*bs));
+ const __m256 d_m = _mm256_broadcast_ss((const float *) (pm + i*bs));
+
+ const uint8_t * restrict pp = pb + i*bs;
+
+ for (int l = 0; l < QK; l += 32) {
+ // Load 32x4-bit integers into 32x8-bit integers
+ __m256i vx8 = bytesFromNibbles(pp+l/2);
+
+ // Convert to 16-bit int
+ const __m256i vx16_lo = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 0));
+ const __m256i vx16_hi = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(vx8, 1));
+
+ // Convert to 32-bit int -> float 32
+ const __m256 vf[4] = {
+ _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 0))),
+ _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_lo, 1))),
+ _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 0))),
+ _mm256_cvtepi32_ps(_mm256_cvtepi16_epi32(_mm256_extracti128_si256(vx16_hi, 1)))
+ };
+
+ // Scale, add m and store
+ for (int j = 0; j < 4; j++) {
+ const __m256 result = _mm256_add_ps(_mm256_mul_ps(vf[j], d_v), d_m);
+ _mm256_storeu_ps(y + i * QK + l + j*8, result);
+ }
+ }
+ }
+#else
+ for (int i = 0; i < nb; i++) {
+ const float d = *(const float *) (pd + i*bs);
+ const float m = *(const float *) (pm + i*bs);
+
+ const uint8_t * restrict pp = pb + i*bs;
+
+ for (int l = 0; l < QK; l += 2) {
+ const uint8_t vi = pp[l/2];
+
+ const int8_t vi0 = vi & 0xf;
+ const int8_t vi1 = vi >> 4;
+
+ const float v0 = vi0*d + m;
+ const float v1 = vi1*d + m;
+
+ y[i*QK + l + 0] = v0;
+ y[i*QK + l + 1] = v1;
+
+ assert(!isnan(y[i*QK + l + 0]));
+ assert(!isnan(y[i*QK + l + 1]));
+ }
+ }
+#endif
+}
+
//
// simd mappings
//
*s = sumf;
}
+#if __AVX512F__ && QK == 32
+static inline __m512 dot_q4_0_oneblock_avx512(
+ __m512 acc,
+ const uint8_t * pd0,
+ const uint8_t * pd1,
+ const uint8_t * pb0,
+ const uint8_t * pb1,
+ size_t bs,
+ int i
+) {
+ const float * d0_0 = (const float *) (pd0 + i*bs);
+ const float * d1_0 = (const float *) (pd1 + i*bs);
+
+ const uint8_t * restrict p0 = pb0 + (i+0)*bs;
+ const uint8_t * restrict p1 = pb1 + (i+0)*bs;
+
+ // Compute combined scale for the block
+ float scaleScalar = d0_0[0] * d1_0[0];
+ __m512 scale = _mm512_set1_ps( scaleScalar );
+
+ __m256i bx = bytesFromNibbles( p0 );
+ __m256i by = bytesFromNibbles( p1 );
+
+ // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
+ const __m256i off = _mm256_set1_epi8( 8 );
+ bx = _mm256_sub_epi8( bx, off );
+ by = _mm256_sub_epi8( by, off );
+
+ // Sign-extend 16 signed bytes into int16_t
+ __m512i x32 = _mm512_cvtepi8_epi16( bx );
+ __m512i y32 = _mm512_cvtepi8_epi16( by );
+ // Compute products of int16_t integers, add pairwise
+ __m512i i64 = _mm512_madd_epi16( x32, y32 );
+
+ // Convert int32_t to float
+ __m512 p = _mm512_cvtepi32_ps( i64 );
+ // Apply the scale, and accumulate
+ return _mm512_fmadd_ps( scale, p, acc );
+}
+#endif
+
inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
ggml_float sumf = 0.0;
*s = sumf;
}
-// compute GGML_VEC_DOT_UNROLL dot products at once
-// xs - x row stride in bytes
-inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
- ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
+inline static void ggml_vec_dot_q4_0(const int n, float * restrict s, const void * restrict x, const void * restrict y) {
+ const int nb = n / QK;
- ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
+ assert(n % QK == 0);
+ assert(nb % 2 == 0);
- for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
- x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
- }
+ const size_t bs = sizeof(float) + QK/2;
-#if defined(GGML_SIMD)
- const int np = (n & ~(GGML_F16_STEP - 1));
+ const uint8_t * restrict pd0 = ((const uint8_t *)x + 0*bs);
+ const uint8_t * restrict pd1 = ((const uint8_t *)y + 0*bs);
- GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
+ const uint8_t * restrict pb0 = ((const uint8_t *)x + 0*bs + sizeof(float));
+ const uint8_t * restrict pb1 = ((const uint8_t *)y + 0*bs + sizeof(float));
- GGML_F16_VEC ax[GGML_F16_ARR];
- GGML_F16_VEC ay[GGML_F16_ARR];
+ float sumf = 0.0;
- for (int i = 0; i < np; i += GGML_F16_STEP) {
- for (int j = 0; j < GGML_F16_ARR; j++) {
- ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
+#if defined(__ARM_NEON)
+ float sum0 = 0.0f;
+ float sum1 = 0.0f;
- for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
- ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
+ for (int i = 0; i < nb; i += 2) {
+ const float d0_0 = *(const float *) (pd0 + i*bs);
+ const float d1_0 = *(const float *) (pd1 + i*bs);
+ const float d0_1 = *(const float *) (pd0 + (i + 1)*bs);
+ const float d1_1 = *(const float *) (pd1 + (i + 1)*bs);
- sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
- }
- }
- }
+ //printf("d0_0: %f, d1_0: %f, d0_1: %f, d1_1: %f\n", d0_0, d1_0, d0_1, d1_1);
- // reduce sum0..sum3 to sum0
- for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
- GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
- }
+ const uint8_t * restrict p0 = pb0 + i*bs;
+ const uint8_t * restrict p1 = pb1 + i*bs;
- // leftovers
- for (int i = np; i < n; ++i) {
- for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
- sumf[j] += GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]);
- }
- }
-#else
- for (int i = 0; i < n; ++i) {
- for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
- sumf[j] += GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]);
- }
- }
-#endif
+ const uint8x16_t m4b = vdupq_n_u8(0xf);
+ const int8x16_t s8b = vdupq_n_s8(0x8);
- for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
- s[i] = sumf[i];
- }
-}
+ const uint8x16_t v0_0 = vld1q_u8(p0);
+ const uint8x16_t v1_0 = vld1q_u8(p1);
+ const uint8x16_t v0_1 = vld1q_u8(p0 + bs);
+ const uint8x16_t v1_1 = vld1q_u8(p1 + bs);
-inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
-#if defined(GGML_SIMD)
- const int np = (n & ~(GGML_F32_STEP - 1));
+ // 4-bit -> 8-bit
+ const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b));
+ const int8x16_t v1_0l = vreinterpretq_s8_u8(vandq_u8(v1_0, m4b));
- GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
+ const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t v1_0h = vreinterpretq_s8_u8(vshrq_n_u8(v1_0, 4));
- GGML_F32_VEC ax[GGML_F32_ARR];
- GGML_F32_VEC ay[GGML_F32_ARR];
+ const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8(v0_1, m4b));
+ const int8x16_t v1_1l = vreinterpretq_s8_u8(vandq_u8(v1_1, m4b));
- for (int i = 0; i < np; i += GGML_F32_STEP) {
- for (int j = 0; j < GGML_F32_ARR; j++) {
- ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
- ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
- ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
+ const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+ const int8x16_t v1_1h = vreinterpretq_s8_u8(vshrq_n_u8(v1_1, 4));
- GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
- }
- }
+ // sub 8
+ const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
+ const int8x16_t v1_0ls = vsubq_s8(v1_0l, s8b);
- // leftovers
- for (int i = np; i < n; ++i) {
- y[i] += x[i]*v;
- }
+ const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
+ const int8x16_t v1_0hs = vsubq_s8(v1_0h, s8b);
+
+ const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
+ const int8x16_t v1_1ls = vsubq_s8(v1_1l, s8b);
+
+ const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
+ const int8x16_t v1_1hs = vsubq_s8(v1_1h, s8b);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ // dot product into int16x8_t
+ int32x4_t p_0 = vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0ls);
+ int32x4_t p_1 = vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1ls);
+
+ p_0 = vdotq_s32(p_0, v0_0hs, v1_0hs);
+ p_1 = vdotq_s32(p_1, v0_1hs, v1_1hs);
+
+ // scalar
+#if defined(__ARM_FEATURE_QRDMX)
+ sum0 += d0_0*d1_0*vaddvq_s32(p_0);
+ sum1 += d0_1*d1_1*vaddvq_s32(p_1);
#else
- // scalar
- for (int i = 0; i < n; ++i) {
- y[i] += x[i]*v;
- }
+ sum0 += d0_0*d1_0*(vgetq_lane_s32(p_0, 0) + vgetq_lane_s32(p_0, 1) + vgetq_lane_s32(p_0, 2) + vgetq_lane_s32(p_0, 3));
+ sum1 += d0_1*d1_1*(vgetq_lane_s32(p_1, 0) + vgetq_lane_s32(p_1, 1) + vgetq_lane_s32(p_1, 2) + vgetq_lane_s32(p_1, 3));
#endif
-}
+#else
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0ls));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0ls));
-inline static void ggml_vec_mad_f16(const int n, ggml_fp16_t * restrict y, ggml_fp16_t * restrict x, const float v) {
-#if defined(GGML_SIMD)
- const int np = (n & ~(GGML_F16_STEP - 1));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0hs));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0hs));
- GGML_F16_VEC vx = GGML_F16_VEC_SET1(v);
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1ls));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1ls));
- GGML_F16_VEC ax[GGML_F16_ARR];
- GGML_F16_VEC ay[GGML_F16_ARR];
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1hs));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1hs));
- for (int i = 0; i < np; i += GGML_F16_STEP) {
- for (int j = 0; j < GGML_F16_ARR; j++) {
- ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
- ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
- ay[j] = GGML_F16_VEC_FMA(ay[j], ax[j], vx);
+ const int16x8_t pl_0 = vaddq_s16(pl0l, pl0h);
+ const int16x8_t ph_0 = vaddq_s16(ph0l, ph0h);
- GGML_F16_VEC_STORE(y + i + j*GGML_F16_EPR, ay, j);
- }
- }
+ const int16x8_t pl_1 = vaddq_s16(pl1l, pl1h);
+ const int16x8_t ph_1 = vaddq_s16(ph1l, ph1h);
- // leftovers
- for (int i = np; i < n; ++i) {
- GGML_ASSERT(false);
- y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v);
- }
+ const int16x8_t p_0 = vaddq_s16(pl_0, ph_0);
+ const int16x8_t p_1 = vaddq_s16(pl_1, ph_1);
+
+ // scalar
+#if defined(__ARM_FEATURE_QRDMX)
+ sum0 += d0_0*d1_0*vaddvq_s16(p_0);
+ sum1 += d0_1*d1_1*vaddvq_s16(p_1);
#else
- for (int i = 0; i < n; ++i) {
- y[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(y[i]) + GGML_FP16_TO_FP32(x[i])*v);
- }
+ sum0 += d0_0*d1_0*(vgetq_lane_s16(p_0, 0) + vgetq_lane_s16(p_0, 1) + vgetq_lane_s16(p_0, 2) + vgetq_lane_s16(p_0, 3) + vgetq_lane_s16(p_0, 4) + vgetq_lane_s16(p_0, 5) + vgetq_lane_s16(p_0, 6) + vgetq_lane_s16(p_0, 7));
+ sum1 += d0_1*d1_1*(vgetq_lane_s16(p_1, 0) + vgetq_lane_s16(p_1, 1) + vgetq_lane_s16(p_1, 2) + vgetq_lane_s16(p_1, 3) + vgetq_lane_s16(p_1, 4) + vgetq_lane_s16(p_1, 5) + vgetq_lane_s16(p_1, 6) + vgetq_lane_s16(p_1, 7));
#endif
-}
+#endif
+ }
-//inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
-inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
-#if defined(GGML_SIMD)
- const int np = (n & ~(GGML_F32_STEP - 1));
+ sumf = sum0 + sum1;
+#elif defined(__AVX512F__)
+ // Initialize accumulator with zeros
+ __m512 acc0 = _mm512_setzero_ps();
+ __m512 acc1 = _mm512_setzero_ps();
- GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
+ const int superblock_size = 8;
+ const int superblock_count = nb / superblock_size;
+ const int remainder = nb % superblock_size;
- GGML_F32_VEC ay[GGML_F32_ARR];
+ for (int superblock_ix = 0; superblock_ix < superblock_count; superblock_ix += 1) {
+ int i = superblock_ix * superblock_size;
- for (int i = 0; i < np; i += GGML_F32_STEP) {
- for (int j = 0; j < GGML_F32_ARR; j++) {
- ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
- ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
+ acc0 = dot_q4_0_oneblock_avx512( acc0, pd0, pd1, pb0, pb1, bs, i+0 );
+ acc1 = dot_q4_0_oneblock_avx512( acc1, pd0, pd1, pb0, pb1, bs, i+1 );
+ acc0 = dot_q4_0_oneblock_avx512( acc0, pd0, pd1, pb0, pb1, bs, i+2 );
+ acc1 = dot_q4_0_oneblock_avx512( acc1, pd0, pd1, pb0, pb1, bs, i+3 );
+ acc0 = dot_q4_0_oneblock_avx512( acc0, pd0, pd1, pb0, pb1, bs, i+4 );
+ acc1 = dot_q4_0_oneblock_avx512( acc1, pd0, pd1, pb0, pb1, bs, i+5 );
+ acc0 = dot_q4_0_oneblock_avx512( acc0, pd0, pd1, pb0, pb1, bs, i+6 );
+ acc1 = dot_q4_0_oneblock_avx512( acc1, pd0, pd1, pb0, pb1, bs, i+7 );
+ }
+
+ // Remainders
+ for (int i = superblock_count * superblock_size; i < nb; ++i) {
+ acc0 = dot_q4_0_oneblock_avx512( acc0, pd0, pd1, pb0, pb1, bs, i );
+ }
+
+ // Horizontal sum of all lanes of the accumulator
+ sumf = _mm512_reduce_add_ps( acc0 ) + _mm512_reduce_add_ps( acc1 );
+#elif defined(__AVX2__)
+ const size_t countBlocks = nb;
+
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ // Main loop
+ for (int i = 0; i < nb; ++i) {
+ const float * d0_0 = (const float *) (pd0 + i*bs);
+ const float * d1_0 = (const float *) (pd1 + i*bs);
+
+ const uint8_t * restrict p0 = pb0 + i*bs;
+ const uint8_t * restrict p1 = pb1 + i*bs;
+
+ // Compute combined scale for the block
+ const __m256 scale = _mm256_mul_ps( _mm256_broadcast_ss( d0_0 ), _mm256_broadcast_ss( d1_0 ) );
+
+ // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
+ __m256i bx = bytesFromNibbles( p0 );
+ __m256i by = bytesFromNibbles( p1 );
+
+ // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
+ const __m256i off = _mm256_set1_epi8( 8 );
+ bx = _mm256_sub_epi8( bx, off );
+ by = _mm256_sub_epi8( by, off );
+
+ // Sign-extend first 16 signed bytes into int16_t
+ __m256i x16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( bx ) );
+ __m256i y16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( by ) );
+ // Compute products of int16_t integers, add pairwise
+ __m256i i32 = _mm256_madd_epi16( x16, y16 );
+
+ // Sign-extend last 16 signed bytes into int16_t vectors
+ x16 = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( bx, 1 ) );
+ y16 = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( by, 1 ) );
+ // Accumulate products of int16_t integers
+ i32 = _mm256_add_epi32( i32, _mm256_madd_epi16( x16, y16 ) );
+
+ // Convert int32_t to float
+ __m256 p = _mm256_cvtepi32_ps( i32 );
+ // Apply the scale, and accumulate
+ acc = _mm256_fmadd_ps( scale, p, acc );
+ }
+
+ // Return horizontal sum of the acc vector
+ __m128 res = _mm256_extractf128_ps( acc, 1 );
+ res = _mm_add_ps( res, _mm256_castps256_ps128( acc ) );
+ res = _mm_add_ps( res, _mm_movehl_ps( res, res ) );
+ res = _mm_add_ss( res, _mm_movehdup_ps( res ) );
+
+ sumf = _mm_cvtss_f32( res );
+#elif defined(__wasm_simd128__)
+ // wasm simd
+ float sum0 = 0.0f;
+ float sum1 = 0.0f;
+
+ for (int i = 0; i < nb; i += 2) {
+ const float d0_0 = *(const float *) (pd0 + i*bs);
+ const float d1_0 = *(const float *) (pd1 + i*bs);
+ const float d0_1 = *(const float *) (pd0 + (i + 1)*bs);
+ const float d1_1 = *(const float *) (pd1 + (i + 1)*bs);
+
+ const uint8_t * restrict p0 = pb0 + i*bs;
+ const uint8_t * restrict p1 = pb1 + i*bs;
+
+ const v128_t m4b = wasm_u8x16_splat(0xf);
+ const v128_t s8b = wasm_i8x16_splat(0x8);
+
+ const v128_t v0_0 = wasm_v128_load(p0);
+ const v128_t v0_1 = wasm_v128_load(p0 + bs);
+ const v128_t v1_0 = wasm_v128_load(p1);
+ const v128_t v1_1 = wasm_v128_load(p1 + bs);
+
+ // 4-bit -> 8-bit
+ const v128_t v0_0l = wasm_v128_and(v0_0, m4b);
+ const v128_t v1_0l = wasm_v128_and(v1_0, m4b);
+
+ const v128_t v0_0h = wasm_u8x16_shr(v0_0, 4);
+ const v128_t v1_0h = wasm_u8x16_shr(v1_0, 4);
+
+ const v128_t v0_1l = wasm_v128_and(v0_1, m4b);
+ const v128_t v1_1l = wasm_v128_and(v1_1, m4b);
+
+ const v128_t v0_1h = wasm_u8x16_shr(v0_1, 4);
+ const v128_t v1_1h = wasm_u8x16_shr(v1_1, 4);
+
+ // sub 8
+ const v128_t v0_0ls = wasm_i8x16_sub(v0_0l, s8b);
+ const v128_t v1_0ls = wasm_i8x16_sub(v1_0l, s8b);
+
+ const v128_t v0_0hs = wasm_i8x16_sub(v0_0h, s8b);
+ const v128_t v1_0hs = wasm_i8x16_sub(v1_0h, s8b);
+
+ const v128_t v0_1ls = wasm_i8x16_sub(v0_1l, s8b);
+ const v128_t v1_1ls = wasm_i8x16_sub(v1_1l, s8b);
+
+ const v128_t v0_1hs = wasm_i8x16_sub(v0_1h, s8b);
+ const v128_t v1_1hs = wasm_i8x16_sub(v1_1h, s8b);
+
+ // dot product into int16x8_t
+ const v128_t pl0l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_0ls), wasm_i16x8_extend_low_i8x16(v1_0ls));
+ const v128_t pl0h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_0ls), wasm_i16x8_extend_high_i8x16(v1_0ls));
+
+ const v128_t ph0l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_0hs), wasm_i16x8_extend_low_i8x16(v1_0hs));
+ const v128_t ph0h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_0hs), wasm_i16x8_extend_high_i8x16(v1_0hs));
+
+ const v128_t pl1l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_1ls), wasm_i16x8_extend_low_i8x16(v1_1ls));
+ const v128_t pl1h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_1ls), wasm_i16x8_extend_high_i8x16(v1_1ls));
+
+ const v128_t ph1l = wasm_i16x8_mul(wasm_i16x8_extend_low_i8x16(v0_1hs), wasm_i16x8_extend_low_i8x16(v1_1hs));
+ const v128_t ph1h = wasm_i16x8_mul(wasm_i16x8_extend_high_i8x16(v0_1hs), wasm_i16x8_extend_high_i8x16(v1_1hs));
+
+ const v128_t pl_0 = wasm_i16x8_add(pl0l, pl0h);
+ const v128_t ph_0 = wasm_i16x8_add(ph0l, ph0h);
+
+ const v128_t pl_1 = wasm_i16x8_add(pl1l, pl1h);
+ const v128_t ph_1 = wasm_i16x8_add(ph1l, ph1h);
+
+ const v128_t p_0 = wasm_i16x8_add(pl_0, ph_0);
+ const v128_t p_1 = wasm_i16x8_add(pl_1, ph_1);
+
+ sum0 += d0_0*d1_0*(
+ wasm_i16x8_extract_lane(p_0, 0) + wasm_i16x8_extract_lane(p_0, 1) +
+ wasm_i16x8_extract_lane(p_0, 2) + wasm_i16x8_extract_lane(p_0, 3) +
+ wasm_i16x8_extract_lane(p_0, 4) + wasm_i16x8_extract_lane(p_0, 5) +
+ wasm_i16x8_extract_lane(p_0, 6) + wasm_i16x8_extract_lane(p_0, 7));
+ sum1 += d0_1*d1_1*(
+ wasm_i16x8_extract_lane(p_1, 0) + wasm_i16x8_extract_lane(p_1, 1) +
+ wasm_i16x8_extract_lane(p_1, 2) + wasm_i16x8_extract_lane(p_1, 3) +
+ wasm_i16x8_extract_lane(p_1, 4) + wasm_i16x8_extract_lane(p_1, 5) +
+ wasm_i16x8_extract_lane(p_1, 6) + wasm_i16x8_extract_lane(p_1, 7));
+ }
+
+ sumf = sum0 + sum1;
+#else
+ // scalar
+ for (int i = 0; i < nb; i++) {
+ const float d0 = *(const float *) (pd0 + i*bs);
+ const float d1 = *(const float *) (pd1 + i*bs);
+
+ const uint8_t * restrict p0 = pb0 + i*bs;
+ const uint8_t * restrict p1 = pb1 + i*bs;
+
+ for (int j = 0; j < QK/2; j++) {
+ const uint8_t v0 = p0[j];
+ const uint8_t v1 = p1[j];
+
+ const float f0 = d0*((int8_t) (v0 & 0xf) - 8);
+ const float f1 = d0*((int8_t) (v0 >> 4) - 8);
+
+ const float f2 = d1*((int8_t) (v1 & 0xf) - 8);
+ const float f3 = d1*((int8_t) (v1 >> 4) - 8);
+
+ sumf += f0*f2 + f1*f3;
+ }
+ }
+#endif
+
+ *s = sumf;
+}
+
+inline static void ggml_vec_dot_q4_1(const int n, float * restrict s, const void * restrict x, const void * restrict y) {
+ const int nb = n / QK;
+
+ const size_t bs = 2*sizeof(float) + QK/2;
+
+ const uint8_t * restrict pd0 = ((const uint8_t *)x + 0*bs);
+ const uint8_t * restrict pd1 = ((const uint8_t *)y + 0*bs);
+
+ const uint8_t * restrict pm0 = ((const uint8_t *)x + 0*bs + sizeof(float));
+ const uint8_t * restrict pm1 = ((const uint8_t *)y + 0*bs + sizeof(float));
+
+ const uint8_t * restrict pb0 = ((const uint8_t *)x + 0*bs + 2*sizeof(float));
+ const uint8_t * restrict pb1 = ((const uint8_t *)y + 0*bs + 2*sizeof(float));
+
+ float sumf = 0.0;
+
+#if defined(__AVX2__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+ // Accumulator for constant offsets
+ float acc_offset = 0.0f;
+
+ // Main loop
+ for (int i = 0; i < nb; ++i) {
+ const float * m0 = (const float *) (pm0 + i*bs);
+ const float * m1 = (const float *) (pm1 + i*bs);
+
+ const float * d0 = (const float *) (pd0 + i*bs);
+ const float * d1 = (const float *) (pd1 + i*bs);
+
+ const uint8_t * restrict p0 = pb0 + i*bs;
+ const uint8_t * restrict p1 = pb1 + i*bs;
+
+ const __m256 d0v = _mm256_broadcast_ss( d0 );
+ const __m256 d1v = _mm256_broadcast_ss( d1 );
+ const __m256 m0v = _mm256_broadcast_ss( m0 );
+ const __m256 m1v = _mm256_broadcast_ss( m1 );
+
+
+ // Compute combined scale for the block
+ const __m256 scale_01 = _mm256_mul_ps( d0v, d1v );
+
+ // Compute cross scales for the block
+ const __m256 scale_0 = _mm256_mul_ps( d0v, m1v );
+ const __m256 scale_1 = _mm256_mul_ps( m0v, d1v );
+ const __m256 cross_scales = _mm256_blend_ps( scale_0, scale_1, 0b10101010 );
+
+ // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
+ __m256i bx = bytesFromNibbles( p0 );
+ __m256i by = bytesFromNibbles( p1 );
+
+ // Now we have a vector with bytes in [ 0 .. 15 ] interval.
+
+ // Sign-extend first 16 signed bytes into int16_t
+ __m256i x16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( bx ) );
+ __m256i y16 = _mm256_cvtepi8_epi16( _mm256_castsi256_si128( by ) );
+ // Compute products of int16_t integers, add pairwise
+ __m256i i32 = _mm256_madd_epi16( x16, y16 );
+
+ // Sign-extend last 16 signed bytes into int16_t vectors
+ __m256i x16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( bx, 1 ) );
+ __m256i y16_h = _mm256_cvtepi8_epi16( _mm256_extracti128_si256( by, 1 ) );
+ // Accumulate products of int16_t integers
+ i32 = _mm256_add_epi32( i32, _mm256_madd_epi16( x16_h, y16_h ) );
+
+ // compute sums of unsigned bytes in bx, by in blocks of 8.
+ // This results in a layout like X100 0000 X200 0000 X300 0000 X400 0000,
+ // which we then interleave as X100 Y100 X200 Y200 X300 Y300 X400 Y400.
+ // so if we then cast to 8 singles, we get 8 floats like [ x0_7, y0_7, x8_15, y8_15, x16_23, y16_23, x24_31, y24_31 ]
+ __m256i xsumi = _mm256_sad_epu8( bx, _mm256_setzero_si256() );
+ __m256i ysumi = _mm256_sad_epu8( by, _mm256_setzero_si256() );
+ __m256i sumsi = _mm256_or_si256( xsumi, _mm256_slli_si256( ysumi, 4 ) );
+ __m256 sums = _mm256_cvtepi32_ps( sumsi );
+
+ // Convert int32_t to float
+ __m256 p = _mm256_cvtepi32_ps( i32 );
+ // Apply the scale, and accumulate
+ // acc += d0*d1*x*y + d0*m1*x + d1*m0*y
+ acc = _mm256_fmadd_ps( scale_01, p, acc );
+ acc = _mm256_fmadd_ps( cross_scales, sums, acc );
+ // acc_offset += m0*m1 (for each entry in the block)
+ acc_offset += (*m0)*(*m1);
+ }
+
+ // Return horizontal sum of the acc vector
+ __m128 res = _mm256_extractf128_ps( acc, 1 );
+ res = _mm_add_ps( res, _mm256_castps256_ps128( acc ) );
+ res = _mm_add_ps( res, _mm_movehl_ps( res, res ) );
+ res = _mm_add_ss( res, _mm_movehdup_ps( res ) );
+
+ sumf = _mm_cvtss_f32( res ) + acc_offset * QK;
+#else
+ // scalar
+ for (int i = 0; i < nb; i++) {
+ const float m0 = *(const float *) (pm0 + i*bs);
+ const float m1 = *(const float *) (pm1 + i*bs);
+
+ const float d0 = *(const float *) (pd0 + i*bs);
+ const float d1 = *(const float *) (pd1 + i*bs);
+
+ const uint8_t * restrict p0 = pb0 + i*bs;
+ const uint8_t * restrict p1 = pb1 + i*bs;
+
+ for (int j = 0; j < QK/2; j++) {
+ const uint8_t v0 = p0[j];
+ const uint8_t v1 = p1[j];
+
+ const float f0 = d0*(v0 & 0xf) + m0;
+ const float f1 = d0*(v0 >> 4) + m0;
+
+ const float f2 = d1*(v1 & 0xf) + m1;
+ const float f3 = d1*(v1 >> 4) + m1;
+
+ sumf += f0*f2 + f1*f3;
+ }
+ }
+#endif
+
+ *s = sumf;
+}
+
+// compute GGML_VEC_DOT_UNROLL dot products at once
+// xs - x row stride in bytes
+inline static void ggml_vec_dot_f16_unroll(const int n, const int xs, float * restrict s, void * restrict xv, ggml_fp16_t * restrict y) {
+ ggml_float sumf[GGML_VEC_DOT_UNROLL] = { 0.0 };
+
+ ggml_fp16_t * restrict x[GGML_VEC_DOT_UNROLL];
+
+ for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
+ x[i] = (ggml_fp16_t *) ((char *) xv + i*xs);
+ }
+
+#if defined(GGML_SIMD)
+ const int np = (n & ~(GGML_F16_STEP - 1));
+
+ GGML_F16_VEC sum[GGML_VEC_DOT_UNROLL][GGML_F16_ARR] = { { GGML_F16_VEC_ZERO } };
+
+ GGML_F16_VEC ax[GGML_F16_ARR];
+ GGML_F16_VEC ay[GGML_F16_ARR];
+
+ for (int i = 0; i < np; i += GGML_F16_STEP) {
+ for (int j = 0; j < GGML_F16_ARR; j++) {
+ ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
+
+ for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
+ ax[j] = GGML_F16_VEC_LOAD(x[k] + i + j*GGML_F16_EPR, j);
+
+ sum[k][j] = GGML_F16_VEC_FMA(sum[k][j], ax[j], ay[j]);
+ }
+ }
+ }
+
+ // reduce sum0..sum3 to sum0
+ for (int k = 0; k < GGML_VEC_DOT_UNROLL; ++k) {
+ GGML_F16_VEC_REDUCE(sumf[k], sum[k]);
+ }
+
+ // leftovers
+ for (int i = np; i < n; ++i) {
+ for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
+ sumf[j] += GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]);
+ }
+ }
+#else
+ for (int i = 0; i < n; ++i) {
+ for (int j = 0; j < GGML_VEC_DOT_UNROLL; ++j) {
+ sumf[j] += GGML_FP16_TO_FP32(x[j][i])*GGML_FP16_TO_FP32(y[i]);
+ }
+ }
+#endif
+
+ for (int i = 0; i < GGML_VEC_DOT_UNROLL; ++i) {
+ s[i] = sumf[i];
+ }
+}
+
+inline static void ggml_vec_mad_f32(const int n, float * restrict y, const float * restrict x, const float v) {
+#if defined(GGML_SIMD)
+ const int np = (n & ~(GGML_F32_STEP - 1));
+
+ GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
+
+ GGML_F32_VEC ax[GGML_F32_ARR];
+ GGML_F32_VEC ay[GGML_F32_ARR];
+
+ for (int i = 0; i < np; i += GGML_F32_STEP) {
+ for (int j = 0; j < GGML_F32_ARR; j++) {
+ ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
+ ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
+ ay[j] = GGML_F32_VEC_FMA(ay[j], ax[j], vx);
+
+ GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
+ }
+ }
+
+ // leftovers
+ for (int i = np; i < n; ++i) {
+ y[i] += x[i]*v;
+ }
+#else
+ // scalar
+ for (int i = 0; i < n; ++i) {
+ y[i] += x[i]*v;
+ }
+#endif
+}
+
+//inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
+inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
+#if defined(GGML_SIMD)
+ const int np = (n & ~(GGML_F32_STEP - 1));
+
+ GGML_F32_VEC vx = GGML_F32_VEC_SET1(v);
+
+ GGML_F32_VEC ay[GGML_F32_ARR];
+
+ for (int i = 0; i < np; i += GGML_F32_STEP) {
+ for (int j = 0; j < GGML_F32_ARR; j++) {
+ ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
+ ay[j] = GGML_F32_VEC_MUL(ay[j], vx);
GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
}
}
#endif
+// Sigmoid Linear Unit (SiLU) function
+inline static float ggml_silu_f32(float x) {
+ return x/(1.0 + exp(-x));
+}
+
+inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
+ const uint16_t * i16 = (const uint16_t *) x;
+ for (int i = 0; i < n; ++i) {
+ y[i] = table_silu_f16[i16[i]];
+ }
+}
+
+#ifdef GGML_SILU_FP16
+inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
+ uint16_t t;
+ for (int i = 0; i < n; ++i) {
+ ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
+ memcpy(&t, &fp16, sizeof(uint16_t));
+ y[i] = GGML_FP16_TO_FP32(table_silu_f16[t]);
+ }
+}
+#else
+inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) {
+ for (int i = 0; i < n; ++i) {
+ y[i] = ggml_silu_f32(x[i]);
+ }
+}
+#endif
+
inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) {
#ifndef GGML_USE_ACCELERATE
ggml_float sum = 0.0;
// data types
//
+static const int GGML_BLCK_SIZE[GGML_TYPE_COUNT] = {
+ QK,
+ QK,
+ 1,
+ 1,
+ 1,
+ 1,
+ 1,
+};
+
+static_assert(GGML_TYPE_COUNT == 7, "GGML_TYPE_COUNT != 5");
+
static const size_t GGML_TYPE_SIZE[GGML_TYPE_COUNT] = {
+ sizeof(float ) + QK/2,
+ sizeof(float )*2 + QK/2,
sizeof(int8_t ),
sizeof(int16_t),
sizeof(int32_t),
sizeof(float ),
};
+// don't forget to update the array above when adding new types
+static_assert(GGML_TYPE_COUNT == 7, "GGML_TYPE_COUNT != 5");
+
static const char * GGML_OP_LABEL[GGML_OP_COUNT] = {
"NONE",
"STEP",
"RELU",
"GELU",
+ "SILU",
"NORM",
+ "RMS_NORM",
"MUL_MAT",
"FLASH_FF",
};
+static_assert(GGML_OP_COUNT == 35, "GGML_OP_COUNT != 35");
+
static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
"none",
"step(x)",
"relu(x)",
"gelu(x)",
+ "silu(x)",
"norm(x)",
+ "rms_norm(x)",
"X*Y",
"flash_ff(x)",
};
+static_assert(GGML_OP_COUNT == 35, "GGML_OP_COUNT != 35");
+
//
// ggml object
//
size_t mem_size;
void * mem_buffer;
bool mem_buffer_owned;
+ bool mem_buffer_mlocked;
int n_objects;
size_t ggml_nbytes(const struct ggml_tensor * tensor) {
static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
- return ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type];
+ return (ggml_nelements(tensor)*GGML_TYPE_SIZE[tensor->type])/GGML_BLCK_SIZE[tensor->type];
+}
+
+int ggml_blck_size(enum ggml_type type) {
+ return GGML_BLCK_SIZE[type];
}
size_t ggml_type_size(enum ggml_type type) {
return GGML_TYPE_SIZE[type];
}
+float ggml_type_sizef(enum ggml_type type) {
+ return ((float)(GGML_TYPE_SIZE[type]))/GGML_BLCK_SIZE[type];
+}
+
size_t ggml_element_size(const struct ggml_tensor * tensor) {
return GGML_TYPE_SIZE[tensor->type];
}
static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
return
- (t0->ne[0] == t1->ne[0]) &&
- (t0->ne[2] == t1->ne[2]) &&
- (t0->ne[3] == t1->ne[3]);
+ (t0->ne[0] == t1->ne[0]) &&
+ (t0->ne[2] == t1->ne[2]) &&
+ (t0->ne[3] == t1->ne[3]);
+}
+
+static inline bool ggml_is_transposed(const struct ggml_tensor * tensor) {
+ return tensor->nb[0] > tensor->nb[1];
}
static inline bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
return
tensor->nb[0] == GGML_TYPE_SIZE[tensor->type] &&
- tensor->nb[1] == tensor->nb[0]*tensor->ne[0] &&
+ tensor->nb[1] == (tensor->nb[0]*tensor->ne[0])/GGML_BLCK_SIZE[tensor->type] &&
tensor->nb[2] == tensor->nb[1]*tensor->ne[1] &&
tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
}
// assert that pointer is aligned to GGML_MEM_ALIGN
#define ggml_assert_aligned(ptr) \
- assert(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
+ GGML_ASSERT(((uintptr_t) (ptr))%GGML_MEM_ALIGN == 0)
////////////////////////////////////////////////////////////////////////////////
static bool is_first_call = true;
if (is_first_call) {
- // initialize GELU, EXP and F32 tables
+ // initialize GELU, SILU and EXP F32 tables
{
const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
memcpy(&ii, &ui, sizeof(ii));
const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
+ table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
table_exp_f16[i] = GGML_FP32_TO_FP16(exp(f));
}
const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
- GGML_PRINT_DEBUG("%s: GELU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
+ GGML_PRINT_DEBUG("%s: GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0f);
}
// initialize g_state
}
*ctx = (struct ggml_context) {
- /*.mem_size =*/ params.mem_size,
- /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : malloc(params.mem_size),
- /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
- /*.n_objects =*/ 0,
- /*.objects_begin =*/ NULL,
- /*.objects_end =*/ NULL,
- /*.scratch =*/ { 0, 0, NULL, },
- /*.scratch_save =*/ { 0, 0, NULL, },
+ /*.mem_size =*/ params.mem_size,
+ /*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : malloc(params.mem_size),
+ /*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
+ /*.mem_buffer_mlocked =*/ false,
+ /*.n_objects =*/ 0,
+ /*.objects_begin =*/ NULL,
+ /*.objects_end =*/ NULL,
+ /*.scratch =*/ { 0, 0, NULL, },
+ /*.scratch_save =*/ { 0, 0, NULL, },
};
+ GGML_ASSERT(ctx->mem_buffer != NULL); // check for allocation failure
+
ggml_assert_aligned(ctx->mem_buffer);
GGML_PRINT_DEBUG("%s: context initialized\n", __func__);
GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n",
__func__, i, ctx->n_objects, ctx->objects_end->offs + ctx->objects_end->size);
+#if GGML_MLOCK_SUPPORT
+ if (ctx->mem_buffer_mlocked) {
+ if (munlock(ctx->mem_buffer, ctx->mem_size)) {
+ fprintf(stderr, "%s: failed to munlock buffer: %s\n", __func__, strerror(errno));
+ }
+ }
+#endif
+
if (ctx->mem_buffer_owned) {
free(ctx->mem_buffer);
}
return result;
}
+bool ggml_mlock_supported(void) {
+ return GGML_MLOCK_SUPPORT;
+}
+
+#if GGML_MLOCK_SUPPORT
+#ifdef __APPLE__
+ #define MLOCK_SUGGESTION "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or\n" \
+ "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l)."
+#else
+ #define MLOCK_SUGGESTION "Try increasing RLIMIT_MLOCK (ulimit -l)."
+#endif
+bool ggml_mlock(struct ggml_context * ctx, char ** err_p) {
+ if (ctx->mem_buffer_mlocked) {
+ return true;
+ }
+ if (mlock(ctx->mem_buffer, ctx->mem_size)) {
+ int ret = asprintf(err_p, "failed to mlock %zu-byte buffer: %s\n" MLOCK_SUGGESTION,
+ ctx->mem_size, strerror(errno));
+ GGML_ASSERT(ret >= 0);
+ return false;
+ }
+ ctx->mem_buffer_mlocked = true;
+ return true;
+}
+#else // GGML_MLOCK_SUPPORT
+bool ggml_mlock(struct ggml_context * ctx, char ** err_p) {
+ *err_p = strdup("can't mlock because it's not supported on this system");
+ return false;
+}
+#endif // GGML_MLOCK_SUPPORT
+
////////////////////////////////////////////////////////////////////////////////
struct ggml_tensor * ggml_new_tensor_impl(
size_t size_needed = 0;
if (data == NULL) {
- size_needed += GGML_TYPE_SIZE[type];
- for (int i = 0; i < n_dims; i++) {
+ size_needed += GGML_TYPE_SIZE[type]*(ne[0]/GGML_BLCK_SIZE[type]);
+ for (int i = 1; i < n_dims; i++) {
size_needed *= ne[i];
}
// align to GGML_MEM_ALIGN
}
result->nb[0] = GGML_TYPE_SIZE[type];
- for (int i = 1; i < GGML_MAX_DIMS; i++) {
+ result->nb[1] = result->nb[0]*(result->ne[0]/GGML_BLCK_SIZE[type]);
+ for (int i = 2; i < GGML_MAX_DIMS; i++) {
result->nb[i] = result->nb[i - 1]*result->ne[i - 1];
}
char * const data = tensor->data;
switch (tensor->type) {
+ case GGML_TYPE_Q4_0:
+ {
+ GGML_ASSERT(false);
+ } break;
+ case GGML_TYPE_Q4_1:
+ {
+ GGML_ASSERT(false);
+ } break;
case GGML_TYPE_I8:
{
assert(tensor->nb[0] == sizeof(int8_t));
} break;
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
char * const data = tensor->data;
switch (tensor->type) {
+ case GGML_TYPE_Q4_0:
+ {
+ GGML_ASSERT(false);
+ } break;
+ case GGML_TYPE_Q4_1:
+ {
+ GGML_ASSERT(false);
+ } break;
case GGML_TYPE_I8:
{
assert(tensor->nb[0] == sizeof(int8_t));
} break;
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
switch (tensor->type) {
+ case GGML_TYPE_Q4_0:
+ {
+ GGML_ASSERT(false);
+ } break;
+ case GGML_TYPE_Q4_1:
+ {
+ GGML_ASSERT(false);
+ } break;
case GGML_TYPE_I8:
{
GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
switch (tensor->type) {
+ case GGML_TYPE_Q4_0:
+ {
+ GGML_ASSERT(false);
+ } break;
+ case GGML_TYPE_Q4_1:
+ {
+ GGML_ASSERT(false);
+ } break;
case GGML_TYPE_I8:
{
GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
switch (tensor->type) {
+ case GGML_TYPE_Q4_0:
+ {
+ GGML_ASSERT(false);
+ } break;
+ case GGML_TYPE_Q4_1:
+ {
+ GGML_ASSERT(false);
+ } break;
case GGML_TYPE_I8:
{
GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
switch (tensor->type) {
- case GGML_TYPE_I8:
+ case GGML_TYPE_Q4_0:
{
- GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
+ GGML_ASSERT(false);
+ } break;
+ case GGML_TYPE_Q4_1:
+ {
+ GGML_ASSERT(false);
+ } break;
+ case GGML_TYPE_I8:
+ {
+ GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
((int8_t *)(tensor->data))[i] = value;
} break;
case GGML_TYPE_I16:
struct ggml_tensor * a,
struct ggml_tensor * b,
bool inplace) {
- assert(ggml_are_same_shape(a, b));
+ GGML_ASSERT(ggml_are_same_shape(a, b));
bool is_node = false;
struct ggml_tensor * a,
struct ggml_tensor * b,
bool inplace) {
- assert(ggml_are_same_shape(a, b));
+ GGML_ASSERT(ggml_are_same_shape(a, b));
bool is_node = false;
struct ggml_tensor * a,
struct ggml_tensor * b,
bool inplace) {
- assert(ggml_are_same_shape(a, b));
+ GGML_ASSERT(ggml_are_same_shape(a, b));
bool is_node = false;
}
if (inplace) {
- assert(is_node == false);
+ GGML_ASSERT(is_node == false);
}
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
struct ggml_tensor * a,
struct ggml_tensor * b,
bool inplace) {
- assert(ggml_are_same_shape(a, b));
+ GGML_ASSERT(ggml_are_same_shape(a, b));
bool is_node = false;
}
if (inplace) {
- assert(is_node == false);
+ GGML_ASSERT(is_node == false);
}
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
bool is_node = false;
if (a->grad) {
- assert(false); // TODO: implement
+ GGML_ASSERT(false); // TODO: implement
is_node = true;
}
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b) {
- assert(ggml_can_repeat(a, b));
+ GGML_ASSERT(ggml_can_repeat(a, b));
bool is_node = false;
return ggml_gelu_impl(ctx, a, true);
}
+// ggml_silu
+
+struct ggml_tensor * ggml_silu_impl(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ bool inplace) {
+ bool is_node = false;
+
+ if (!inplace && (a->grad)) {
+ is_node = true;
+ }
+
+ struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
+
+ result->op = GGML_OP_SILU;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = NULL;
+
+ return result;
+}
+
+struct ggml_tensor * ggml_silu(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_silu_impl(ctx, a, false);
+}
+
+struct ggml_tensor * ggml_silu_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_silu_impl(ctx, a, true);
+}
+
// ggml_norm
struct ggml_tensor * ggml_norm_impl(
bool is_node = false;
if (!inplace && (a->grad)) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
return ggml_norm_impl(ctx, a, true);
}
+struct ggml_tensor * ggml_rms_norm_impl(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ bool inplace) {
+ bool is_node = false;
+
+ if (!inplace && (a->grad)) {
+ GGML_ASSERT(false); // TODO: implement backward
+ is_node = true;
+ }
+
+ struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
+
+ result->op = GGML_OP_RMS_NORM;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src0 = a;
+ result->src1 = NULL; // TODO: maybe store epsilon here?
+
+ return result;
+}
+
+struct ggml_tensor * ggml_rms_norm(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_rms_norm_impl(ctx, a, false);
+}
+
+struct ggml_tensor * ggml_rms_norm_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_rms_norm_impl(ctx, a, true);
+}
+
// ggml_mul_mat
struct ggml_tensor * ggml_mul_mat(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b) {
- assert(ggml_can_mul_mat(a, b));
+ GGML_ASSERT(ggml_can_mul_mat(a, b));
+ GGML_ASSERT(!ggml_is_transposed(a));
bool is_node = false;
struct ggml_tensor * a,
struct ggml_tensor * b,
bool inplace) {
- assert(ggml_is_scalar(b));
- assert(ggml_is_padded_1d(a));
+ GGML_ASSERT(ggml_is_scalar(b));
+ GGML_ASSERT(ggml_is_padded_1d(a));
bool is_node = false;
if (!inplace && (a->grad || b->grad)) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
struct ggml_tensor * a,
struct ggml_tensor * b,
bool inplace) {
- assert(ggml_nelements(a) == ggml_nelements(b));
+ GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
bool is_node = false;
if (!inplace && (a->grad || b->grad)) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b) {
- assert(ggml_is_contiguous(a));
- assert(ggml_is_contiguous(b));
- assert(ggml_nelements(a) == ggml_nelements(b));
+ GGML_ASSERT(ggml_is_contiguous(a));
+ GGML_ASSERT(ggml_is_contiguous(b));
+ GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
bool is_node = false;
if (a->grad || b->grad) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
struct ggml_tensor * a,
int ne0,
int ne1) {
- assert(ggml_is_contiguous(a));
- assert(ggml_nelements(a) == ne0*ne1);
+ GGML_ASSERT(ggml_is_contiguous(a));
+ GGML_ASSERT(ggml_nelements(a) == ne0*ne1);
bool is_node = false;
if (a->grad) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
int ne0,
int ne1,
int ne2) {
- assert(ggml_is_contiguous(a));
- assert(ggml_nelements(a) == ne0*ne1*ne2);
+ GGML_ASSERT(ggml_is_contiguous(a));
+ GGML_ASSERT(ggml_nelements(a) == ne0*ne1*ne2);
bool is_node = false;
if (a->grad) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
int ne0,
size_t offset) {
if (a->grad) {
- assert(false); // gradient propagation is not supported
+ GGML_ASSERT(false); // gradient propagation is not supported
}
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset);
size_t nb1,
size_t offset) {
if (a->grad) {
- assert(false); // gradient propagation is not supported
+ GGML_ASSERT(false); // gradient propagation is not supported
}
const int ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 };
int axis1,
int axis2,
int axis3) {
- assert(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
- assert(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
- assert(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
- assert(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
-
- assert(axis0 != axis1);
- assert(axis0 != axis2);
- assert(axis0 != axis3);
- assert(axis1 != axis2);
- assert(axis1 != axis3);
- assert(axis2 != axis3);
+ GGML_ASSERT(axis0 >= 0 && axis0 < GGML_MAX_DIMS);
+ GGML_ASSERT(axis1 >= 0 && axis1 < GGML_MAX_DIMS);
+ GGML_ASSERT(axis2 >= 0 && axis2 < GGML_MAX_DIMS);
+ GGML_ASSERT(axis3 >= 0 && axis3 < GGML_MAX_DIMS);
+
+ GGML_ASSERT(axis0 != axis1);
+ GGML_ASSERT(axis0 != axis2);
+ GGML_ASSERT(axis0 != axis3);
+ GGML_ASSERT(axis1 != axis2);
+ GGML_ASSERT(axis1 != axis3);
+ GGML_ASSERT(axis2 != axis3);
bool is_node = false;
if (a->grad) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
bool is_node = false;
if (a->grad) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b) {
- assert(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
+ GGML_ASSERT(ggml_is_matrix(a) && ggml_is_vector(b) && b->type == GGML_TYPE_I32);
bool is_node = false;
if (a->grad || b->grad) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
bool is_node = false;
if (a->grad) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
bool is_node = false;
if (a->grad) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
int n_past,
int n_dims,
int mode) {
- assert(n_past >= 0);
+ GGML_ASSERT(n_past >= 0);
bool is_node = false;
if (a->grad) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b) {
- assert(ggml_is_matrix(b));
- assert(a->ne[1] == b->ne[1]);
- assert(a->ne[3] == 1);
+ GGML_ASSERT(ggml_is_matrix(b));
+ GGML_ASSERT(a->ne[1] == b->ne[1]);
+ GGML_ASSERT(a->ne[3] == 1);
bool is_node = false;
if (a->grad || b->grad) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b) {
- assert(ggml_is_matrix(b));
- assert(a->ne[1] == b->ne[1]);
- assert(a->ne[3] == 1);
+ GGML_ASSERT(ggml_is_matrix(b));
+ GGML_ASSERT(a->ne[1] == b->ne[1]);
+ GGML_ASSERT(a->ne[3] == 1);
bool is_node = false;
if (a->grad || b->grad) {
- assert(false); // TODO: implement backward
+ GGML_ASSERT(false); // TODO: implement backward
is_node = true;
}
struct ggml_tensor * k,
struct ggml_tensor * v,
bool masked) {
- assert(ggml_can_mul_mat(k, q));
+ GGML_ASSERT(ggml_can_mul_mat(k, q));
// TODO: check if vT can be multiplied by (k*qT)
bool is_node = false;
struct ggml_tensor * b1,
struct ggml_tensor * c0,
struct ggml_tensor * c1) {
- assert(ggml_can_mul_mat(b0, a));
+ GGML_ASSERT(ggml_can_mul_mat(b0, a));
// TODO: more checks
bool is_node = false;
struct ggml_tensor * tensor) {
tensor->is_param = true;
- assert(tensor->grad == NULL);
+ GGML_ASSERT(tensor->grad == NULL);
tensor->grad = ggml_dup_tensor(ctx, tensor);
}
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
struct ggml_tensor * dst) {
- assert(params->ith == 0);
- assert(ggml_is_contiguous(dst));
- assert(ggml_nelements(dst) == ggml_nelements(src0));
+ GGML_ASSERT(params->ith == 0);
+ GGML_ASSERT(ggml_is_contiguous(dst));
+ GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;
if (src0->nb[0] == sizeof(ggml_fp16_t)) {
if (dst->type == GGML_TYPE_F16) {
- int id = 0;
+ size_t id = 0;
const size_t rs = ne00*nb00;
for (int i03 = 0; i03 < ne03; i03++) {
}
}
} else if (dst->type == GGML_TYPE_F32) {
- int id = 0;
+ size_t id = 0;
float * dst_ptr = (float *) dst->data;
for (int i03 = 0; i03 < ne03; i03++) {
//printf("%s: this is not optimal - fix me\n", __func__);
if (dst->type == GGML_TYPE_F32) {
- int id = 0;
+ size_t id = 0;
float * dst_ptr = (float *) dst->data;
for (int i03 = 0; i03 < ne03; i03++) {
}
}
} else if (dst->type == GGML_TYPE_F16) {
- int id = 0;
+ size_t id = 0;
ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
for (int i03 = 0; i03 < ne03; i03++) {
if (src0->nb[0] == sizeof(float)) {
if (dst->type == GGML_TYPE_F32) {
- int id = 0;
+ size_t id = 0;
const size_t rs = ne00*nb00;
for (int i03 = 0; i03 < ne03; i03++) {
}
}
} else if (dst->type == GGML_TYPE_F16) {
- int id = 0;
+ size_t id = 0;
ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
for (int i03 = 0; i03 < ne03; i03++) {
//printf("%s: this is not optimal - fix me\n", __func__);
if (dst->type == GGML_TYPE_F32) {
- int id = 0;
+ size_t id = 0;
float * dst_ptr = (float *) dst->data;
for (int i03 = 0; i03 < ne03; i03++) {
}
}
} else if (dst->type == GGML_TYPE_F16) {
- int id = 0;
+ size_t id = 0;
ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
for (int i03 = 0; i03 < ne03; i03++) {
{
ggml_compute_forward_dup_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
{
ggml_compute_forward_add_f32(params, src0, src1, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_sub_f32(params, src0, src1, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_mul_f32(params, src0, src1, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_div_f32(params, src0, src1, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_sqr_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_sqrt_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_sum_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_mean_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_repeat_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_abs_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_sgn_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_neg_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_step_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_relu_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_gelu_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_I8:
+ case GGML_TYPE_I16:
+ case GGML_TYPE_I32:
+ case GGML_TYPE_F16:
+ case GGML_TYPE_COUNT:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+
+ //printf("XXXXXXXX gelu\n");
+}
+
+// ggml_compute_forward_silu
+
+static void ggml_compute_forward_silu_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(ggml_is_contiguous(src0));
+ GGML_ASSERT(ggml_is_contiguous(dst));
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int nc = src0->ne[0];
+ const int nr = ggml_nrows(src0);
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ for (int i1 = ir0; i1 < ir1; i1++) {
+ ggml_vec_silu_f32(nc,
+ (float *) ((char *) dst->data + i1*( dst->nb[1])),
+ (float *) ((char *) src0->data + i1*(src0->nb[1])));
+
+#ifndef NDEBUG
+ for (int k = 0; k < nc; k++) {
+ const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
+ UNUSED(x);
+ assert(!isnan(x));
+ assert(!isinf(x));
+ }
+#endif
+ }
+}
+
+static void ggml_compute_forward_silu(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_silu_f32(params, src0, dst);
+ } break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
+
// ggml_compute_forward_norm
static void ggml_compute_forward_norm_f32(
{
ggml_compute_forward_norm_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_I8:
+ case GGML_TYPE_I16:
+ case GGML_TYPE_I32:
+ case GGML_TYPE_F16:
+ case GGML_TYPE_COUNT:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+static void ggml_compute_forward_rms_norm_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ GGML_ASSERT(src0->nb[0] == sizeof(float));
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int ne00 = src0->ne[0];
+ const int ne01 = src0->ne[1];
+ const int ne02 = src0->ne[2];
+ const int ne03 = src0->ne[3];
+
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ const ggml_float eps = 1e-6f; // TODO: make this a parameter
+
+ // TODO: optimize
+ for (int i03 = 0; i03 < ne03; i03++) {
+ for (int i02 = 0; i02 < ne02; i02++) {
+ for (int i01 = ith; i01 < ne01; i01 += nth) {
+ const float * x = (float *) ((char *) src0->data + i01*nb01 + i02*nb02 + i03*nb03);
+
+ ggml_float mean = 0.0;
+ for (int i00 = 0; i00 < ne00; i00++) {
+ mean += x[i00] * x[i00];
+ }
+
+ mean /= ne00;
+
+ float * y = (float *) ((char *) dst->data + i01*nb1 + i02*nb2 + i03*nb3);
+
+ memcpy(y, x, ne00 * sizeof(float));
+ // for (int i00 = 0; i00 < ne00; i00++) {
+ // y[i00] = x[i00];
+ // }
+
+ const float scale = 1.0/sqrt(mean + eps);
+
+ ggml_vec_scale_f32(ne00, y, scale);
+ }
+ }
+ }
+}
+
+static void ggml_compute_forward_rms_norm(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_rms_norm_f32(params, src0, dst);
+ } break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
+
// ggml_compute_forward_mul_mat
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
- UNUSED(src0);
+ //const int ne00 = src0->ne[0];
+ //const int ne01 = src0->ne[1];
const int ne10 = src1->ne[0];
const int ne1 = dst->ne[1];
// TODO: find the optimal values for these
- if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && (
- (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)
- )) {
- //printf("BLAS: %d %d %d\n", ne0, ne1, ne10);
+ if (ggml_is_contiguous(src0) &&
+ ggml_is_contiguous(src1) && ((ne0 >= 32 && ne1 >= 32 && ne10 >= 32))) {
+
+ /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
return true;
}
const int ne1 = dst->ne[1];
const int ne2 = dst->ne[2];
const int ne3 = dst->ne[3];
- const int ne = ne0*ne1*ne2*ne3;
+ //const int ne = ne0*ne1*ne2*ne3;
const int nb00 = src0->nb[0];
const int nb01 = src0->nb[1];
assert(ne3 == ne13);
// TODO: we don't support permuted src0
- assert(nb00 == sizeof(float) || nb01 == sizeof(float));
+ assert(nb00 == sizeof(float));
// dst cannot be transposed or permuted
assert(nb0 == sizeof(float));
// nb01 >= nb00 - src0 is not transposed
// compute by src0 rows
- //
- // nb00 < nb01 - src0 is transposed
- // compute by src0 columns
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
for (int i03 = 0; i03 < ne03; i03++) {
for (int i02 = 0; i02 < ne02; i02++) {
- const float * x = (float *) (src0->data);
+ const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03);
const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
// zT = y * xT
- {
- cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
- ne11, ne01, ne10,
- 1.0f, y, ne10,
- x, ne10,
- 0.0f, d, ne01);
- }
+ cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
+ ne11, ne01, ne10,
+ 1.0f, y, ne10,
+ x, ne10,
+ 0.0f, d, ne01);
}
}
#endif
if (params->type == GGML_TASK_INIT) {
- if (nb01 >= nb00) {
- return;
- }
-
- // TODO: fix this memset (wsize is overestimated)
- memset(params->wdata, 0, params->wsize);
return;
}
if (params->type == GGML_TASK_FINALIZE) {
- if (nb01 >= nb00) {
- return;
- }
-
- // TODO: fix this memset (wsize is overestimated)
- //assert(params->wsize == (ggml_nbytes(dst) + CACHE_LINE_SIZE)*nth);
-
- float * const wdata = params->wdata;
-
- // cols per thread
- const int dc = (ne + nth - 1)/nth;
-
- // col range for this thread
- const int ic0 = dc*ith;
- const int ic1 = MIN(ic0 + dc, ne);
-
- ggml_vec_cpy_f32(ic1 - ic0, (float *) dst->data + ic0, wdata + ic0);
-
- for (int k = 1; k < nth; k++) {
- ggml_vec_acc_f32(ic1 - ic0, (float *) dst->data + ic0, wdata + (ne + CACHE_LINE_SIZE_F32)*k + ic0);
- }
-
return;
}
- if (nb01 >= nb00) {
- // TODO: do not support transposed src1
- assert(nb10 == sizeof(float));
+ // TODO: do not support transposed src1
+ assert(nb10 == sizeof(float));
- // parallelize by src0 rows using ggml_vec_dot_f32
+ // parallelize by src0 rows using ggml_vec_dot_f32
- // total rows in src0
- const int nr = ne01*ne02*ne03;
+ // total rows in src0
+ const int nr = ne01*ne02*ne03;
- // rows per thread
- const int dr = (nr + nth - 1)/nth;
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
- // row range for this thread
- const int ir0 = dr*ith;
- const int ir1 = MIN(ir0 + dr, nr);
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
- for (int ir = ir0; ir < ir1; ++ir) {
- // src0 indices
- const int i03 = ir/(ne02*ne01);
- const int i02 = (ir - i03*ne02*ne01)/ne01;
- const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0 indices
+ const int i03 = ir/(ne02*ne01);
+ const int i02 = (ir - i03*ne02*ne01)/ne01;
+ const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
- for (int ic = 0; ic < ne11; ++ic) {
- // src1 indices
- const int i13 = i03;
- const int i12 = i02;
- const int i11 = ic;
+ for (int ic = 0; ic < ne11; ++ic) {
+ // src1 indices
+ const int i13 = i03;
+ const int i12 = i02;
+ const int i11 = ic;
- // dst indices
- const int i0 = i01;
- const int i1 = i11;
- const int i2 = i02;
- const int i3 = i03;
-
- ggml_vec_dot_f32(ne00,
- (float *) ((char *) dst->data + (i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
- (float *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)),
- (float *) ((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13)));
- }
+ // dst indices
+ const int i0 = i01;
+ const int i1 = i11;
+ const int i2 = i02;
+ const int i3 = i03;
+
+ ggml_vec_dot_f32(ne00,
+ (float *) ((char *) dst->data + (i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
+ (float *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)),
+ (float *) ((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13)));
}
- } else {
- // parallelize by src1 columns using ggml_vec_mad_f32
- // each thread has its own work data
- // during FINALIZE we accumulate all work data into dst
+ }
- // total columns in src1
- const int nc = ne10;
+ //int64_t t1 = ggml_perf_time_us();
+ //static int64_t acc = 0;
+ //acc += t1 - t0;
+ //if (t1 - t0 > 10) {
+ // printf("\n");
+ // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
+ // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
+ // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
+ // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
- // columns per thread
- const int dc = (nc + nth - 1)/nth;
+ // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
+ //}
+}
- // column range for this thread
- const int ic0 = dc*ith;
- const int ic1 = MIN(ic0 + dc, nc);
+static void ggml_compute_forward_mul_mat_f16_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ int64_t t0 = ggml_perf_time_us();
+ UNUSED(t0);
+
+ const int ne00 = src0->ne[0];
+ const int ne01 = src0->ne[1];
+ const int ne02 = src0->ne[2];
+ const int ne03 = src0->ne[3];
+
+ const int ne10 = src1->ne[0];
+ const int ne11 = src1->ne[1];
+ const int ne12 = src1->ne[2];
+ const int ne13 = src1->ne[3];
+
+ const int ne0 = dst->ne[0];
+ const int ne1 = dst->ne[1];
+ const int ne2 = dst->ne[2];
+ const int ne3 = dst->ne[3];
+ //const int ne = ne0*ne1*ne2*ne3;
+
+ const int nb00 = src0->nb[0];
+ const int nb01 = src0->nb[1];
+ const int nb02 = src0->nb[2];
+ const int nb03 = src0->nb[3];
+
+ const int nb10 = src1->nb[0];
+ const int nb11 = src1->nb[1];
+ const int nb12 = src1->nb[2];
+ const int nb13 = src1->nb[3];
+
+ const int nb0 = dst->nb[0];
+ const int nb1 = dst->nb[1];
+ const int nb2 = dst->nb[2];
+ const int nb3 = dst->nb[3];
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ GGML_ASSERT(ne02 == ne12);
+ GGML_ASSERT(ne03 == ne13);
+ GGML_ASSERT(ne2 == ne12);
+ GGML_ASSERT(ne3 == ne13);
+
+ // TODO: we don't support permuted src0
+ GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
+
+ // dst cannot be transposed or permuted
+ GGML_ASSERT(nb0 == sizeof(float));
+ GGML_ASSERT(nb0 <= nb1);
+ GGML_ASSERT(nb1 <= nb2);
+ GGML_ASSERT(nb2 <= nb3);
+
+ GGML_ASSERT(ne0 == ne01);
+ GGML_ASSERT(ne1 == ne11);
+ GGML_ASSERT(ne2 == ne02);
+ GGML_ASSERT(ne3 == ne03);
+
+ // nb01 >= nb00 - src0 is not transposed
+ // compute by src0 rows
+
+#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
+ if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
+ GGML_ASSERT(nb10 == sizeof(float));
+
+ if (params->ith != 0) {
+ return;
+ }
+
+ if (params->type == GGML_TASK_INIT) {
+ return;
+ }
+
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
- // work data for thread
- const int wo = (ne + CACHE_LINE_SIZE_F32)*ith;
float * const wdata = params->wdata;
+ for (int i03 = 0; i03 < ne03; i03++) {
+ for (int i02 = 0; i02 < ne02; i02++) {
+ {
+ size_t id = 0;
+ for (int i01 = 0; i01 < ne01; ++i01) {
+ for (int i00 = 0; i00 < ne00; ++i00) {
+ wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00));
+ }
+ }
+ }
+
+ const float * x = wdata;
+ const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
+
+ float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
+
+ // zT = y * xT
+ cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
+ ne11, ne01, ne10,
+ 1.0f, y, ne10,
+ x, ne10,
+ 0.0f, d, ne01);
+ }
+ }
+
+ /*printf("CBLAS F16 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);*/
+
+ return;
+ }
+#endif
+
+ if (params->type == GGML_TASK_INIT) {
+ ggml_fp16_t * const wdata = params->wdata;
+
+ size_t id = 0;
for (int i13 = 0; i13 < ne13; ++i13) {
for (int i12 = 0; i12 < ne12; ++i12) {
for (int i11 = 0; i11 < ne11; ++i11) {
- for (int ic = ic0; ic < ic1; ++ic) {
- // src1 indices
- const int i10 = ic;
-
- // src0 indices
- const int i03 = i13;
- const int i02 = i12;
- const int i00 = ic;
-
- // dst indices
- const int i1 = i11;
- const int i2 = i12;
- const int i3 = i13;
-
- assert(sizeof(float)*(wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + ne01) <= params->wsize);
-
- ggml_vec_mad_f32(ne01,
- (float *) (wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0),
- (float *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03)),
- *(float *) ((char *) src1->data + (i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13)));
+ for (int i10 = 0; i10 < ne10; ++i10) {
+ wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10));
}
}
}
}
+
+ GGML_ASSERT(id*sizeof(ggml_fp16_t) <= params->wsize);
+
+ return;
}
- //int64_t t1 = ggml_perf_time_us();
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // fp16 -> half the size, so divide by 2
+ // TODO: do not support transposed src1
+ assert(nb10/2 == sizeof(ggml_fp16_t));
+
+ // parallelize by src0 rows using ggml_vec_dot_f16
+
+ // total rows in src0
+ const int nr = ne01*ne02*ne03;
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ ggml_fp16_t * wdata = params->wdata;
+
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0 indices
+ const int i03 = ir/(ne02*ne01);
+ const int i02 = (ir - i03*ne02*ne01)/ne01;
+ const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
+
+ const int i13 = i03;
+ const int i12 = i02;
+
+ const int i0 = i01;
+ const int i2 = i02;
+ const int i3 = i03;
+
+ ggml_fp16_t * src0_row = (ggml_fp16_t *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
+ ggml_fp16_t * src1_col = wdata + ( 0 + i12*ne11 + i13*ne12*ne11)*ne00;
+
+ float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
+
+ for (int ic = 0; ic < ne11; ++ic) {
+ ggml_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00);
+ }
+ }
+
+ //int64_t t1 = ggml_time_us();
+ //static int64_t acc = 0;
+ //acc += t1 - t0;
+ //if (t1 - t0 > 10) {
+ // printf("\n");
+ // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
+ // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
+ // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
+
+ // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
+ //}
+}
+
+static void ggml_compute_forward_mul_mat_q4_0_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ int64_t t0 = ggml_perf_time_us();
+ UNUSED(t0);
+
+ const int ne00 = src0->ne[0];
+ const int ne01 = src0->ne[1];
+ const int ne02 = src0->ne[2];
+ const int ne03 = src0->ne[3];
+
+ const int ne10 = src1->ne[0];
+ const int ne11 = src1->ne[1];
+ const int ne12 = src1->ne[2];
+ const int ne13 = src1->ne[3];
+
+ const int ne0 = dst->ne[0];
+ const int ne1 = dst->ne[1];
+ const int ne2 = dst->ne[2];
+ const int ne3 = dst->ne[3];
+ //const int ne = ne0*ne1*ne2*ne3;
+
+ const int nb00 = src0->nb[0];
+ const int nb01 = src0->nb[1];
+ const int nb02 = src0->nb[2];
+ const int nb03 = src0->nb[3];
+
+ const int nb10 = src1->nb[0];
+ const int nb11 = src1->nb[1];
+ const int nb12 = src1->nb[2];
+ const int nb13 = src1->nb[3];
+
+ const int nb0 = dst->nb[0];
+ const int nb1 = dst->nb[1];
+ const int nb2 = dst->nb[2];
+ const int nb3 = dst->nb[3];
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ GGML_ASSERT(ne02 == ne12);
+ GGML_ASSERT(ne03 == ne13);
+ GGML_ASSERT(ne2 == ne12);
+ GGML_ASSERT(ne3 == ne13);
+
+ // TODO: we don't support permuted src0
+ GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[GGML_TYPE_Q4_0]);
+
+ // dst cannot be transposed or permuted
+ GGML_ASSERT(nb0 == sizeof(float));
+ GGML_ASSERT(nb0 <= nb1);
+ GGML_ASSERT(nb1 <= nb2);
+ GGML_ASSERT(nb2 <= nb3);
+
+ GGML_ASSERT(ne0 == ne01);
+ GGML_ASSERT(ne1 == ne11);
+ GGML_ASSERT(ne2 == ne02);
+ GGML_ASSERT(ne3 == ne03);
+
+ // nb01 >= nb00 - src0 is not transposed
+ // compute by src0 rows
+
+#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
+ if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
+ GGML_ASSERT(nb10 == sizeof(float));
+
+ if (params->ith != 0) {
+ return;
+ }
+
+ if (params->type == GGML_TASK_INIT) {
+ return;
+ }
+
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ float * const wdata = params->wdata;
+
+ for (int i03 = 0; i03 < ne03; i03++) {
+ for (int i02 = 0; i02 < ne02; i02++) {
+ {
+ size_t id = 0;
+ for (int i01 = 0; i01 < ne01; ++i01) {
+ dequantize_row_q4_0((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00);
+ id += ne00;
+ }
+ }
+
+ const float * x = wdata;
+ const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
+
+ float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
+
+ // zT = y * xT
+ cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
+ ne11, ne01, ne10,
+ 1.0f, y, ne10,
+ x, ne10,
+ 0.0f, d, ne01);
+ }
+ }
+
+ /*printf("CBLAS Q4_0 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);*/
+
+ return;
+ }
+#endif
+
+ if (params->type == GGML_TASK_INIT) {
+ char * wdata = params->wdata;
+
+ for (int i13 = 0; i13 < ne13; ++i13) {
+ for (int i12 = 0; i12 < ne12; ++i12) {
+ for (int i11 = 0; i11 < ne11; ++i11) {
+ quantize_row_q4_0((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
+ wdata += (ne10*GGML_TYPE_SIZE[GGML_TYPE_Q4_0])/GGML_BLCK_SIZE[GGML_TYPE_Q4_0];
+ }
+ }
+ }
+
+ return;
+ }
+
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // TODO: do not support transposed src1
+
+ // parallelize by src0 rows using ggml_vec_dot_q4_0
+
+ // total rows in src0
+ const int nr = ne01*ne02*ne03;
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ void * wdata = params->wdata;
+
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0 indices
+ const int i03 = ir/(ne02*ne01);
+ const int i02 = (ir - i03*ne02*ne01)/ne01;
+ const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
+
+ const int i13 = i03;
+ const int i12 = i02;
+
+ const int i0 = i01;
+ const int i2 = i02;
+ const int i3 = i03;
+
+ void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
+ char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*ne00*GGML_TYPE_SIZE[GGML_TYPE_Q4_0])/GGML_BLCK_SIZE[GGML_TYPE_Q4_0]);
+
+ float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
+
+ assert(ne00 % 32 == 0);
+
+ for (int ic = 0; ic < ne11; ++ic) {
+ ggml_vec_dot_q4_0(ne00, &dst_col[ic*ne0], src0_row, ((void *) (src1_col + (ic*ne00*GGML_TYPE_SIZE[GGML_TYPE_Q4_0])/GGML_BLCK_SIZE[GGML_TYPE_Q4_0])));
+ }
+ }
+
+ //int64_t t1 = ggml_time_us();
//static int64_t acc = 0;
//acc += t1 - t0;
//if (t1 - t0 > 10) {
// printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
// printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
// printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
- // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
// printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
//}
}
-static void ggml_compute_forward_mul_mat_f16_f32(
+static void ggml_compute_forward_mul_mat_q4_1_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
const int ne1 = dst->ne[1];
const int ne2 = dst->ne[2];
const int ne3 = dst->ne[3];
- const int ne = ne0*ne1*ne2*ne3;
+ //const int ne = ne0*ne1*ne2*ne3;
const int nb00 = src0->nb[0];
const int nb01 = src0->nb[1];
GGML_ASSERT(ne3 == ne13);
// TODO: we don't support permuted src0
- GGML_ASSERT(nb00 == sizeof(ggml_fp16_t) || nb01 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb00 == (int) GGML_TYPE_SIZE[GGML_TYPE_Q4_1]);
// dst cannot be transposed or permuted
GGML_ASSERT(nb0 == sizeof(float));
// nb01 >= nb00 - src0 is not transposed
// compute by src0 rows
- //
- // nb00 < nb01 - src0 is transposed
- // compute by src0 columns
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
for (int i03 = 0; i03 < ne03; i03++) {
for (int i02 = 0; i02 < ne02; i02++) {
{
- int id = 0;
+ size_t id = 0;
for (int i01 = 0; i01 < ne01; ++i01) {
- for (int i00 = 0; i00 < ne00; ++i00) {
- wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00));
- }
+ dequantize_row_q4_1((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00);
+ id += ne00;
}
}
const float * x = wdata;
const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
- // float * z = wdata + ne00*ne01;
-
- // z = x * yT
- //{
- // cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
- // ne01, ne11, ne00,
- // 1.0f, x, ne00,
- // y, ne00,
- // 0.0f, z, ne11);
- //}
-
float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
- // transpose z
- //for (int j = 0; j < ne11; ++j) {
- // for (int i = 0; i < ne01; ++i) {
- // d[j*ne01 + i] = z[i*ne11 + j];
- // }
- //}
-
- {
-#if 1
- // zT = y * xT
- cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
- ne11, ne01, ne10,
- 1.0f, y, ne00,
- x, ne00,
- 0.0f, d, ne01);
-#else
- // zT = (xT * y)T
- cblas_sgemm(CblasColMajor, CblasTrans, CblasNoTrans,
- ne01, ne11, ne10,
- 1.0f, x, ne00,
- y, ne00,
- 0.0f, d, ne01);
-#endif
- }
+ // zT = y * xT
+ cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
+ ne11, ne01, ne10,
+ 1.0f, y, ne10,
+ x, ne10,
+ 0.0f, d, ne01);
}
}
#endif
if (params->type == GGML_TASK_INIT) {
- if (nb01 >= nb00) {
- ggml_fp16_t * const wdata = params->wdata;
-
- int id = 0;
- for (int i13 = 0; i13 < ne13; ++i13) {
- for (int i12 = 0; i12 < ne12; ++i12) {
- for (int i11 = 0; i11 < ne11; ++i11) {
- for (int i10 = 0; i10 < ne10; ++i10) {
- wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10));
- }
- }
+ char * wdata = params->wdata;
+
+ for (int i13 = 0; i13 < ne13; ++i13) {
+ for (int i12 = 0; i12 < ne12; ++i12) {
+ for (int i11 = 0; i11 < ne11; ++i11) {
+ //for (int i10 = 0; i10 < ne10; ++i10) {
+ // wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10));
+ //}
+ quantize_row_q4_1((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
+ wdata += (ne10*GGML_TYPE_SIZE[GGML_TYPE_Q4_1])/GGML_BLCK_SIZE[GGML_TYPE_Q4_1];
}
}
-
- GGML_ASSERT(id*sizeof(ggml_fp16_t) <= params->wsize);
-
- return;
}
- // TODO: fix this memset (wsize is overestimated)
- memset(params->wdata, 0, params->wsize);
return;
}
if (params->type == GGML_TASK_FINALIZE) {
- if (nb01 >= nb00) {
- return;
- }
-
- // TODO: fix this memset (wsize is overestimated)
- //assert(params->wsize == (ggml_nbytes(dst) + CACHE_LINE_SIZE)*nth);
-
- ggml_fp16_t * const wdata = params->wdata;
-
- // cols per thread
- const int dc = (ne + nth - 1)/nth;
-
- // col range for this thread
- const int ic0 = dc*ith;
- const int ic1 = MIN(ic0 + dc, ne);
-
- for (int i = ic0; i < ic1; ++i) {
- ((float *) dst->data)[i] = GGML_FP16_TO_FP32(wdata[i]);
- }
-
- for (int k = 1; k < nth; k++) {
- for (int i = ic0; i < ic1; ++i) {
- ((float *) dst->data)[i] += GGML_FP16_TO_FP32(wdata[(ne + CACHE_LINE_SIZE_F32)*k + i]);
- }
- }
-
return;
}
- if (nb01 >= nb00) {
- // fp16 -> half the size, so divide by 2
- // TODO: do not support transposed src1
- assert(nb10/2 == sizeof(ggml_fp16_t));
-
- // parallelize by src0 rows using ggml_vec_dot_f16
-
- // total rows in src0
- const int nr = ne01*ne02*ne03;
-
- // rows per thread
- const int dr = (nr + nth - 1)/nth;
-
- // row range for this thread
- const int ir0 = dr*ith;
- const int ir1 = MIN(ir0 + dr, nr);
-
- ggml_fp16_t * wdata = params->wdata;
-
- for (int ir = ir0; ir < ir1; ++ir) {
- // src0 indices
- const int i03 = ir/(ne02*ne01);
- const int i02 = (ir - i03*ne02*ne01)/ne01;
- const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
-
- const int i13 = i03;
- const int i12 = i02;
-
- const int i0 = i01;
- const int i2 = i02;
- const int i3 = i03;
-
- ggml_fp16_t * src0_row = (ggml_fp16_t *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
- ggml_fp16_t * src1_col = wdata + ( 0 + i12*ne11 + i13*ne12*ne11)*ne00;
-
- float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
+ // TODO: do not support transposed src1
- assert(ne00 % 32 == 0);
-
- for (int ic = 0; ic < ne11; ++ic) {
- ggml_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00);
- }
- }
- } else {
- // parallelize by src1 columns using ggml_vec_mad_f16
- // each thread has its own work data
- // during FINALIZE we accumulate all work data into dst
+ // parallelize by src0 rows using ggml_vec_dot_q4_1
- // total columns in src1
- const int nc = ne10;
+ // total rows in src0
+ const int nr = ne01*ne02*ne03;
- // columns per thread
- const int dc = (nc + nth - 1)/nth;
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
- // column range for this thread
- const int ic0 = dc*ith;
- const int ic1 = MIN(ic0 + dc, nc);
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
- // work data for thread
- const int wo = (ne + CACHE_LINE_SIZE_F32)*ith;
- ggml_fp16_t * const wdata = params->wdata;
+ void * wdata = params->wdata;
- for (int i13 = 0; i13 < ne13; ++i13) {
- for (int i12 = 0; i12 < ne12; ++i12) {
- for (int i11 = 0; i11 < ne11; ++i11) {
- // dst indices
- const int i1 = i11;
- const int i2 = i12;
- const int i3 = i13;
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0 indices
+ const int i03 = ir/(ne02*ne01);
+ const int i02 = (ir - i03*ne02*ne01)/ne01;
+ const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
- ggml_fp16_t * dst_row = wdata + wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0;
+ const int i13 = i03;
+ const int i12 = i02;
- for (int ic = ic0; ic < ic1; ++ic) {
- // src1 indices
- const int i10 = ic;
+ const int i0 = i01;
+ const int i2 = i02;
+ const int i3 = i03;
- // src0 indices
- const int i03 = i13;
- const int i02 = i12;
- const int i00 = ic;
+ void * src0_row = (void *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
+ char * src1_col = ((char *) wdata + ( (0 + i12*ne11 + i13*ne12*ne11)*ne00*GGML_TYPE_SIZE[GGML_TYPE_Q4_1])/GGML_BLCK_SIZE[GGML_TYPE_Q4_1]);
- assert(sizeof(ggml_fp16_t)*(wo + i3*ne2*ne1*ne0 + i2*ne1*ne0 + i1*ne0 + ne01) <= params->wsize);
+ float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
- ggml_fp16_t * src0_col = (ggml_fp16_t *) ((char *) src0->data + (i00*nb00 + i02*nb02 + i03*nb03));
- float src1_val = * (float *) ((char *) src1->data + (i10*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
+ assert(ne00 % 32 == 0);
- ggml_vec_mad_f16(ne01, dst_row, src0_col, src1_val);
- }
- }
- }
+ for (int ic = 0; ic < ne11; ++ic) {
+ ggml_vec_dot_q4_1(ne00, &dst_col[ic*ne0], src0_row, ((void *) (src1_col + (ic*ne00*GGML_TYPE_SIZE[GGML_TYPE_Q4_1])/GGML_BLCK_SIZE[GGML_TYPE_Q4_1])));
}
}
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
switch (src0->type) {
+ case GGML_TYPE_Q4_0:
+ {
+ ggml_compute_forward_mul_mat_q4_0_f32(params, src0, src1, dst);
+ } break;
+ case GGML_TYPE_Q4_1:
+ {
+ ggml_compute_forward_mul_mat_q4_1_f32(params, src0, src1, dst);
+ } break;
case GGML_TYPE_F16:
{
ggml_compute_forward_mul_mat_f16_f32(params, src0, src1, dst);
case GGML_TYPE_I32:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
+
+#if 0
+ if (src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_Q4_1) {
+ static int first = 8;
+ printf("src0: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src0->ne[0], src0->ne[1], src0->ne[2]);
+ printf("src1: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src1->ne[0], src1->ne[1], src1->ne[2]);
+ printf("dst: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
+ if (first) {
+ --first;
+ } else {
+ for (int k = 0; k < dst->ne[1]; ++k) {
+ for (int j = 0; j < dst->ne[0]/16; ++j) {
+ for (int i = 0; i < 16; ++i) {
+ printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
+ }
+ printf("\n");
+ }
+ printf("\n");
+ }
+ printf("\n");
+ exit(0);
+ }
+ } else {
+ printf("aaaa src0: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src0->ne[0], src0->ne[1], src0->ne[2]);
+ printf("aaaa src1: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", src1->ne[0], src1->ne[1], src1->ne[2]);
+ printf("aaaa dst: ne0 = %5d, ne1 = %5d, ne2 = %5d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
+ }
+#endif
}
// ggml_compute_forward_scale
{
ggml_compute_forward_scale_f32(params, src0, src1, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
// ggml_compute_forward_get_rows
+static void ggml_compute_forward_get_rows_q4_0(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ assert(params->ith == 0);
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int nc = src0->ne[0];
+ const int nr = ggml_nelements(src1);
+
+ assert( dst->ne[0] == nc);
+ assert( dst->ne[1] == nr);
+ assert(src0->nb[0] == GGML_TYPE_SIZE[GGML_TYPE_Q4_0]);
+
+ for (int i = 0; i < nr; ++i) {
+ const int r = ((int32_t *) src1->data)[i];
+
+ dequantize_row_q4_0(
+ (const void *) ((char *) src0->data + r*src0->nb[1]),
+ (float *) ((char *) dst->data + i*dst->nb[1]), nc);
+ }
+}
+
+static void ggml_compute_forward_get_rows_q4_1(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ assert(params->ith == 0);
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int nc = src0->ne[0];
+ const int nr = ggml_nelements(src1);
+
+ assert( dst->ne[0] == nc);
+ assert( dst->ne[1] == nr);
+ assert(src0->nb[0] == GGML_TYPE_SIZE[GGML_TYPE_Q4_1]);
+
+ for (int i = 0; i < nr; ++i) {
+ const int r = ((int32_t *) src1->data)[i];
+
+ dequantize_row_q4_1(
+ (const void *) ((char *) src0->data + r*src0->nb[1]),
+ (float *) ((char *) dst->data + i*dst->nb[1]), nc);
+ }
+}
+
static void ggml_compute_forward_get_rows_f16(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
switch (src0->type) {
+ case GGML_TYPE_Q4_0:
+ {
+ ggml_compute_forward_get_rows_q4_0(params, src0, src1, dst);
+ } break;
+ case GGML_TYPE_Q4_1:
+ {
+ ggml_compute_forward_get_rows_q4_1(params, src0, src1, dst);
+ } break;
case GGML_TYPE_F16:
{
ggml_compute_forward_get_rows_f16(params, src0, src1, dst);
case GGML_TYPE_I32:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
+
+ //static bool first = true;
+ //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]);
+ //if (first) {
+ // first = false;
+ //} else {
+ // for (int k = 0; k < dst->ne[1]; ++k) {
+ // for (int j = 0; j < dst->ne[0]/16; ++j) {
+ // for (int i = 0; i < 16; ++i) {
+ // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]);
+ // }
+ // printf("\n");
+ // }
+ // printf("\n");
+ // }
+ // printf("\n");
+ // exit(0);
+ //}
}
// ggml_compute_forward_diag_mask_inf
{
ggml_compute_forward_diag_mask_inf_f32(params, src0, src1, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
#ifndef NDEBUG
for (int i = 0; i < nc; ++i) {
+ //printf("p[%d] = %f\n", i, p[i]);
assert(!isnan(p[i]));
}
#endif
{
ggml_compute_forward_soft_max_f32(params, src0, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
}
}
+static void ggml_compute_forward_rope_f16(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ assert(params->ith == 0);
+ assert(src1->type == GGML_TYPE_I32);
+ assert(ggml_nelements(src1) == 3);
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int n_past = ((int32_t *) src1->data)[0];
+ const int n_dims = ((int32_t *) src1->data)[1];
+ const int mode = ((int32_t *) src1->data)[2];
+
+ //const int ne0 = src0->ne[0];
+ const int ne1 = src0->ne[1];
+ const int ne2 = src0->ne[2];
+ const int ne3 = src0->ne[3];
+
+ const int nb0 = src0->nb[0];
+ const int nb1 = src0->nb[1];
+ const int nb2 = src0->nb[2];
+ const int nb3 = src0->nb[3];
+
+ //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
+ //printf("n_past = %d, ne2 = %d\n", n_past, ne2);
+
+ assert(nb0 == sizeof(ggml_fp16_t));
+
+ for (int i3 = 0; i3 < ne3; i3++) {
+ for (int i2 = (mode == 0 ? 0 : n_past); i2 < ne2; i2++) {
+ const int p = (mode == 0 ? n_past + i2 : i2);
+ for (int i1 = 0; i1 < ne1; i1++) {
+ for (int i0 = 0; i0 < n_dims; i0 += 2) {
+ const double theta = pow(10000.0, ((double)-i0)/n_dims);
+
+ const double cos_theta = cos(p*theta);
+ const double sin_theta = sin(p*theta);
+
+ const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+ ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+
+ double x0 = ggml_fp16_to_fp32(src[0]);
+ double x1 = ggml_fp16_to_fp32(src[1]);
+
+ dst_data[0] = ggml_fp32_to_fp16(x0*cos_theta - x1*sin_theta);
+ dst_data[1] = ggml_fp32_to_fp16(x0*sin_theta + x1*cos_theta);
+ }
+ }
+ }
+ }
+}
+
static void ggml_compute_forward_rope(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
switch (src0->type) {
+ case GGML_TYPE_F16:
+ {
+ ggml_compute_forward_rope_f16(params, src0, src1, dst);
+ } break;
case GGML_TYPE_F32:
{
ggml_compute_forward_rope_f32(params, src0, src1, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
- case GGML_TYPE_F16:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
ggml_compute_forward_conv_1d_1s_f32(params, src0, src1, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
{
ggml_compute_forward_conv_1d_2s_f32(params, src0, src1, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
{
ggml_compute_forward_flash_attn_f32(params, q, k, v, masked, dst);
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
{
GGML_ASSERT(false); // TODO
} break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
/////////////////////////////////
static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
- assert(params);
+ GGML_ASSERT(params);
switch (tensor->op) {
case GGML_OP_DUP:
{
ggml_compute_forward_gelu(params, tensor->src0, tensor);
} break;
+ case GGML_OP_SILU:
+ {
+ ggml_compute_forward_silu(params, tensor->src0, tensor);
+ } break;
case GGML_OP_NORM:
{
ggml_compute_forward_norm(params, tensor->src0, tensor);
} break;
+ case GGML_OP_RMS_NORM:
+ {
+ ggml_compute_forward_rms_norm(params, tensor->src0, tensor);
+ } break;
case GGML_OP_MUL_MAT:
{
ggml_compute_forward_mul_mat(params, tensor->src0, tensor->src1, tensor);
} break;
case GGML_OP_MEAN:
{
- assert(false); // TODO: implement
+ GGML_ASSERT(false); // TODO: implement
} break;
case GGML_OP_REPEAT:
{
} break;
case GGML_OP_GELU:
{
- assert(false); // TODO: not implemented
+ GGML_ASSERT(false); // TODO: not implemented
+ } break;
+ case GGML_OP_SILU:
+ {
+ GGML_ASSERT(false); // TODO: not implemented
} break;
case GGML_OP_NORM:
{
- assert(false); // TODO: not implemented
+ GGML_ASSERT(false); // TODO: not implemented
+ } break;
+ case GGML_OP_RMS_NORM:
+ {
+ GGML_ASSERT(false); // TODO: not implemented
} break;
case GGML_OP_MUL_MAT:
{
if (src0->grad) {
// TODO: this requires outer product - ggml_out_prod(ctx, src1, tensor->grad);
- assert(false);
+ GGML_ASSERT(false);
}
if (src1->grad) {
src1->grad =
if (node->op == GGML_OP_NONE && node->grad == NULL) {
// reached a leaf node, not part of the gradient graph (e.g. a constant)
- assert(cgraph->n_leafs < GGML_MAX_NODES);
+ GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES);
cgraph->leafs[cgraph->n_leafs] = node;
cgraph->n_leafs++;
} else {
- assert(cgraph->n_nodes < GGML_MAX_NODES);
+ GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES);
cgraph->nodes[cgraph->n_nodes] = node;
cgraph->grads[cgraph->n_nodes] = node->grad;
if (n_new > 0) {
// the last added node should always be starting point
- assert(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
+ GGML_ASSERT(cgraph->nodes[cgraph->n_nodes - 1] == tensor);
}
}
struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) {
struct ggml_cgraph result = *gf;
- assert(gf->n_nodes > 0);
+ GGML_ASSERT(gf->n_nodes > 0);
// if we are keeping the gradient graph, we have to detach the gradient nodes from the original graph
if (keep) {
}
void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
- if (cgraph->n_threads <= 0) {
- cgraph->n_threads = 8;
- }
-
const int n_threads = cgraph->n_threads;
struct ggml_compute_state_shared state_shared = {
};
int rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_thread, &workers[j]);
- assert(rc == 0);
+ GGML_ASSERT(rc == 0);
UNUSED(rc);
}
}
{
node->n_tasks = n_threads;
} break;
+ case GGML_OP_SILU:
+ {
+ node->n_tasks = n_threads;
+ } break;
case GGML_OP_NORM:
+ case GGML_OP_RMS_NORM:
{
node->n_tasks = n_threads;
} break;
size_t cur = 0;
- // TODO: better way to determine if the matrix is transposed
- if (node->src0->nb[1] < node->src0->nb[0]) {
- cur = ggml_nbytes(node)*node->n_tasks; // TODO: this can become (n_tasks-1)
- } else {
- if (node->src0->type == GGML_TYPE_F16 &&
+ if (node->src0->type == GGML_TYPE_F16 &&
+ node->src1->type == GGML_TYPE_F32) {
+#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
+ if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
+ node->n_tasks = 1; // TODO: this actually is doing nothing
+ // the threads are still spinning
+ cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
+ //printf("src0: ne0 = %d, ne1 = %d, ne = %d\n", node->src0->ne[0], node->src0->ne[1], node->src0->ne[0]*node->src0->ne[1]);
+ //printf("src1: ne0 = %d, ne1 = %d, ne = %d\n", node->src1->ne[0], node->src1->ne[1], node->src1->ne[0]*node->src1->ne[1]);
+ //printf("cur = %zu\n", cur);
+ } else {
+ cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1);
+ }
+#else
+ cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1);
+#endif
+ } else if (node->src0->type == GGML_TYPE_F32 &&
+ node->src1->type == GGML_TYPE_F32) {
+ cur = 0;
+ } else if (node->src0->type == GGML_TYPE_Q4_0 &&
node->src1->type == GGML_TYPE_F32) {
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
- if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
- node->n_tasks = 1; // TODO: this actually is doing nothing
- // the threads are still spinning
- cur = sizeof(float)*(node->src0->ne[0]*node->src0->ne[1]);
- //printf("src0: ne0 = %d, ne1 = %d, ne = %d\n", node->src0->ne[0], node->src0->ne[1], node->src0->ne[0]*node->src0->ne[1]);
- //printf("src1: ne0 = %d, ne1 = %d, ne = %d\n", node->src1->ne[0], node->src1->ne[1], node->src1->ne[0]*node->src1->ne[1]);
- //printf("cur = %zu\n", cur);
- } else {
- cur = sizeof(ggml_fp16_t)*ggml_nelements(node->src1);
- }
+ if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
+ node->n_tasks = 1;
+ cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
+ } else {
+ cur = (GGML_TYPE_SIZE[GGML_TYPE_Q4_0]*ggml_nelements(node->src1))/GGML_BLCK_SIZE[GGML_TYPE_Q4_0];
+ }
#else
- cur = sizeof(ggml_fp16_t)*ggml_nelements(node->src1);
+ cur = (GGML_TYPE_SIZE[GGML_TYPE_Q4_0]*ggml_nelements(node->src1))/GGML_BLCK_SIZE[GGML_TYPE_Q4_0];
#endif
- } else if (node->src0->type == GGML_TYPE_F32 &&
- node->src1->type == GGML_TYPE_F32) {
- cur = 0;
+ } else if (node->src0->type == GGML_TYPE_Q4_1 &&
+ node->src1->type == GGML_TYPE_F32) {
+#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
+ if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
+ node->n_tasks = 1;
+ cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
} else {
- GGML_ASSERT(false);
+ cur = (GGML_TYPE_SIZE[GGML_TYPE_Q4_1]*ggml_nelements(node->src1))/GGML_BLCK_SIZE[GGML_TYPE_Q4_1];
}
+#else
+ cur = (GGML_TYPE_SIZE[GGML_TYPE_Q4_1]*ggml_nelements(node->src1))/GGML_BLCK_SIZE[GGML_TYPE_Q4_1];
+#endif
+ } else {
+ GGML_ASSERT(false);
}
work_size = MAX(work_size, cur);
} break;
case GGML_OP_COUNT:
{
- assert(false);
+ GGML_ASSERT(false);
} break;
}
}
if (cgraph->work != NULL && work_size > cgraph->work_size) {
- assert(false); // TODO: better handling
+ GGML_ASSERT(false); // TODO: better handling
}
if (work_size > 0 && cgraph->work == NULL) {
for (int j = 0; j < n_threads - 1; j++) {
int rc = ggml_thread_join(workers[j].thrd, NULL);
- assert(rc == 0);
+ GGML_ASSERT(rc == 0);
UNUSED(rc);
}
char color[16];
FILE * fp = fopen(filename, "w");
- assert(fp);
+ GGML_ASSERT(fp);
fprintf(fp, "digraph G {\n");
fprintf(fp, " newrank = true;\n");
struct ggml_tensor * f,
struct ggml_cgraph * gf,
struct ggml_cgraph * gb) {
- assert(ggml_is_scalar(f));
+ GGML_ASSERT(ggml_is_scalar(f));
gf->n_threads = params.n_threads;
gb->n_threads = params.n_threads;
if (gf->nodes[i]->is_param) {
GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
- assert(np < GGML_MAX_PARAMS);
+ GGML_ASSERT(np < GGML_MAX_PARAMS);
ps[np++] = gf->nodes[i];
nx += ggml_nelements(gf->nodes[i]);
if (gf->nodes[i]->is_param) {
GGML_PRINT_DEBUG("found param %d: grad->op = %d\n", np, gf->nodes[i]->grad->op);
- assert(np < GGML_MAX_PARAMS);
+ GGML_ASSERT(np < GGML_MAX_PARAMS);
ps[np++] = gf->nodes[i];
nx += ggml_nelements(gf->nodes[i]);
////////////////////////////////////////////////////////////////////////////////
+size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int qk, int64_t * hist) {
+ const int nb = k / qk;
+ const size_t bs = (sizeof(float) + sizeof(uint8_t)*qk/2);
+ const size_t row_size = nb*bs;
+
+ assert(k % qk == 0);
+
+ char * pdst = (char *) dst;
+
+ for (int j = 0; j < n; j += k) {
+ uint8_t * pd = (uint8_t *) (pdst + (j/k)*row_size + 0*bs);
+ uint8_t * pb = (uint8_t *) (pdst + (j/k)*row_size + 0*bs + sizeof(float));
+
+ quantize_row_q4_0_reference(src + j, pd, k);
+
+ for (int i = 0; i < nb; i++) {
+ for (int l = 0; l < qk; l += 2) {
+ const uint8_t vi0 = pb[l/2] & 0xF;
+ const uint8_t vi1 = pb[l/2] >> 4;
+
+ hist[vi0]++;
+ hist[vi1]++;
+ }
+ pb += bs;
+ }
+ }
+
+ return (n/k)*row_size;
+}
+
+size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int qk, int64_t * hist) {
+ const int nb = k / qk;
+ const size_t bs = (2*sizeof(float) + sizeof(uint8_t)*qk/2);
+ const size_t row_size = nb*bs;
+
+ assert(k % qk == 0);
+
+ char * pdst = (char *) dst;
+
+ for (int j = 0; j < n; j += k) {
+ uint8_t * pd = (uint8_t *) (pdst + (j/k)*row_size + 0*bs);
+ uint8_t * pb = (uint8_t *) (pdst + (j/k)*row_size + 0*bs + 2*sizeof(float));
+
+ quantize_row_q4_1(src + j, pd, k);
+
+ for (int i = 0; i < nb; i++) {
+ for (int l = 0; l < qk; l += 2) {
+ const uint8_t vi0 = pb[l/2] & 0xF;
+ const uint8_t vi1 = pb[l/2] >> 4;
+
+ hist[vi0]++;
+ hist[vi1]++;
+ }
+ pb += bs;
+ }
+ }
+
+ return (n/k)*row_size;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
int ggml_cpu_has_avx(void) {
#if defined(__AVX__)
return 1;