ggml.c
ggml-alloc.h
ggml-alloc.c
+ ggml-backend.h
+ ggml-backend.c
+ ggml-quants.h
+ ggml-quants.c
${GGML_SOURCES_METAL}
${GGML_SOURCES_CUDA}
${GGML_SOURCES_OPENCL}
ggml-alloc.o: ggml-alloc.c ggml.h ggml-alloc.h
$(CC) $(CFLAGS) -c $< -o $@
-WHISPER_OBJ += ggml-alloc.o
+ggml-backend.o: ggml-backend.c ggml.h ggml-backend.h
+ $(CC) $(CFLAGS) -c $< -o $@
+
+ggml-quants.o: ggml-quants.c ggml.h ggml-quants.h
+ $(CC) $(CFLAGS) -c $< -o $@
+
+WHISPER_OBJ += ggml-alloc.o ggml-backend.o ggml-quants.o
whisper.o: whisper.cpp whisper.h ggml.h ggml-cuda.h
$(CXX) $(CXXFLAGS) -c $< -o $@
-Subproject commit 22a9eef021afc67f2154bc9811ed620b26299d1b
+Subproject commit 44b39fd4ec616a9ce66635e36045372d03dd45e0
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','whisper.h')} .")
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml.h')} .")
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml.c')} .")
+system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-impl.h')} .")
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-alloc.h')} .")
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-alloc.c')} .")
+system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-backend-impl.h')} .")
+system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-backend.h')} .")
+system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-backend.c')} .")
+system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-quants.h')} .")
+system("cp #{File.join(File.dirname(__FILE__),'..','..','..','ggml-quants.c')} .")
system("cp #{File.join(File.dirname(__FILE__),'..','..','..','examples','dr_wav.h')} .")
--- /dev/null
+#pragma once
+
+// ggml-backend internal header
+
+#include "ggml-backend.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ //
+ // Backend buffer
+ //
+
+ typedef void * ggml_backend_buffer_context_t;
+
+ struct ggml_backend_buffer_i {
+ void (*free_buffer) (ggml_backend_buffer_t buffer);
+ void * (*get_base) (ggml_backend_buffer_t buffer); // get base pointer
+ size_t (*get_alloc_size)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-allocation callback
+ void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // post-allocation callback
+ void (*free_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-free callback
+ };
+
+ struct ggml_backend_buffer {
+ struct ggml_backend_buffer_i iface;
+
+ ggml_backend_t backend;
+ ggml_backend_buffer_context_t context;
+
+ size_t size;
+ };
+
+ GGML_API ggml_backend_buffer_t ggml_backend_buffer_init(
+ struct ggml_backend * backend,
+ struct ggml_backend_buffer_i iface,
+ ggml_backend_buffer_context_t context,
+ size_t size);
+
+ //
+ // Backend
+ //
+
+ typedef void * ggml_backend_context_t;
+
+ struct ggml_backend_i {
+ const char * (*get_name)(ggml_backend_t backend);
+
+ void (*free)(ggml_backend_t backend);
+
+ // buffer allocation
+ ggml_backend_buffer_t (*alloc_buffer)(ggml_backend_t backend, size_t size);
+
+ // get buffer alignment
+ size_t (*get_alignment)(ggml_backend_t backend);
+
+ // tensor data access
+ // these functions can be asynchronous, helper functions are provided for synchronous access that automatically call synchronize
+ void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
+ void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
+ void (*synchronize) (ggml_backend_t backend);
+
+ // (optional) copy tensor between different backends, allow for single-copy tranfers
+ void (*cpy_tensor_from)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
+ void (*cpy_tensor_to) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
+
+ // compute graph with a plan
+ ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
+ void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
+ void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
+
+ // compute graph without a plan
+ void (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph);
+
+ // check if the backend supports an operation
+ bool (*supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
+ };
+
+ struct ggml_backend {
+ struct ggml_backend_i iface;
+
+ ggml_backend_context_t context;
+ };
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+#include "ggml-backend-impl.h"
+#include "ggml-alloc.h"
+#include "ggml-impl.h"
+
+#include <assert.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define UNUSED GGML_UNUSED
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+// backend buffer
+
+ggml_backend_buffer_t ggml_backend_buffer_init(
+ struct ggml_backend * backend,
+ struct ggml_backend_buffer_i iface,
+ ggml_backend_buffer_context_t context,
+ size_t size) {
+ ggml_backend_buffer_t buffer = malloc(sizeof(struct ggml_backend_buffer));
+
+ GGML_ASSERT(iface.get_base != NULL);
+
+ (*buffer) = (struct ggml_backend_buffer) {
+ /* .interface = */ iface,
+ /* .backend = */ backend,
+ /* .context = */ context,
+ /* .size = */ size,
+ };
+
+ return buffer;
+}
+
+void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) {
+ if (buffer == NULL) {
+ return;
+ }
+
+ if (buffer->iface.free_buffer != NULL) {
+ buffer->iface.free_buffer(buffer);
+ }
+ free(buffer);
+}
+
+size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) {
+ return ggml_backend_get_alignment(buffer->backend);
+}
+
+size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) {
+ return buffer->size;
+}
+
+void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) {
+ void * base = buffer->iface.get_base(buffer);
+
+ GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL");
+
+ return base;
+}
+
+size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
+ // get_alloc_size is optional, defaults to ggml_nbytes
+ if (buffer->iface.get_alloc_size) {
+ return buffer->iface.get_alloc_size(buffer, tensor);
+ }
+ return ggml_nbytes(tensor);
+}
+
+void ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
+ // init_tensor is optional
+ if (buffer->iface.init_tensor) {
+ buffer->iface.init_tensor(buffer, tensor);
+ }
+}
+
+void ggml_backend_buffer_free_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
+ // free_tensor is optional
+ if (buffer->iface.free_tensor) {
+ buffer->iface.free_tensor(buffer, tensor);
+ }
+}
+
+// backend
+
+ggml_backend_t ggml_get_backend(const struct ggml_tensor * tensor) {
+ return tensor->buffer ? tensor->buffer->backend : NULL;
+}
+
+const char * ggml_backend_name(ggml_backend_t backend) {
+ if (backend == NULL) {
+ return "NULL";
+ }
+ return backend->iface.get_name(backend);
+}
+
+void ggml_backend_free(ggml_backend_t backend) {
+ if (backend == NULL) {
+ return;
+ }
+
+ backend->iface.free(backend);
+}
+
+ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) {
+ return backend->iface.alloc_buffer(backend, size);
+}
+
+size_t ggml_backend_get_alignment(ggml_backend_t backend) {
+ return backend->iface.get_alignment(backend);
+}
+
+void ggml_backend_tensor_set_async(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ ggml_get_backend(tensor)->iface.set_tensor_async(ggml_get_backend(tensor), tensor, data, offset, size);
+}
+
+void ggml_backend_tensor_get_async(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ ggml_get_backend(tensor)->iface.get_tensor_async(ggml_get_backend(tensor), tensor, data, offset, size);
+}
+
+void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ ggml_backend_t backend = ggml_get_backend(tensor);
+
+ GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
+ GGML_ASSERT(backend != NULL && "tensor backend not set");
+
+ backend->iface.set_tensor_async(backend, tensor, data, offset, size);
+ backend->iface.synchronize(backend);
+}
+
+void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ ggml_backend_t backend = ggml_get_backend(tensor);
+
+ GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
+ GGML_ASSERT(backend != NULL && "tensor backend not set");
+
+ backend->iface.get_tensor_async(backend, tensor, data, offset, size);
+ backend->iface.synchronize(backend);
+}
+
+void ggml_backend_synchronize(ggml_backend_t backend) {
+ backend->iface.synchronize(backend);
+}
+
+ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
+ return backend->iface.graph_plan_create(backend, cgraph);
+}
+
+void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
+ backend->iface.graph_plan_free(backend, plan);
+}
+
+void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
+ backend->iface.graph_plan_compute(backend, plan);
+}
+
+void ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
+ backend->iface.graph_compute(backend, cgraph);
+}
+
+bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
+ return backend->iface.supports_op(backend, op);
+}
+
+// backend copy
+
+static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
+ if (a->type != b->type) {
+ return false;
+ }
+ for (int i = 0; i < GGML_MAX_DIMS; i++) {
+ if (a->ne[i] != b->ne[i]) {
+ return false;
+ }
+ if (a->nb[i] != b->nb[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) {
+ //printf("src: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", src->name, (int)src->ne[0], (int)src->ne[1], (int)src->ne[2], (int)src->ne[3], (int)src->nb[0], (int)src->nb[1], (int)src->nb[2], (int)src->nb[3]);
+ //printf("dst: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", dst->name, (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], (int)dst->nb[0], (int)dst->nb[1], (int)dst->nb[2], (int)dst->nb[3]);
+ GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
+
+ // fprintf(stderr, "cpy tensor %s from %s to %s (%lu bytes)\n", src->name, ggml_backend_name(src->backend), ggml_backend_name(dst->backend), ggml_nbytes(src));
+
+ if (src == dst) {
+ return;
+ }
+
+ // TODO: allow backends to support copy to/from same backend
+
+ if (ggml_get_backend(dst)->iface.cpy_tensor_from != NULL) {
+ ggml_get_backend(dst)->iface.cpy_tensor_from(ggml_get_backend(dst)->context, src, dst);
+ } else if (ggml_get_backend(src)->iface.cpy_tensor_to != NULL) {
+ ggml_get_backend(src)->iface.cpy_tensor_to(ggml_get_backend(src)->context, src, dst);
+ } else {
+ // shouldn't be hit when copying from/to CPU
+ #ifndef NDEBUG
+ fprintf(stderr, "ggml_backend_tensor_copy: neither cpy_tensor_from nor cpy_tensor_to are implemented for backends %s and %s, falling back to get/set\n", ggml_backend_name(src->buffer->backend), ggml_backend_name(dst->buffer->backend));
+ #endif
+ size_t nbytes = ggml_nbytes(src);
+ void * data = malloc(nbytes);
+ ggml_backend_tensor_get(src, data, 0, nbytes);
+ ggml_backend_tensor_set(dst, data, 0, nbytes);
+ free(data);
+ }
+}
+
+// backend CPU
+
+struct ggml_backend_cpu_context {
+ int n_threads;
+ void * work_data;
+ size_t work_size;
+};
+
+static const char * ggml_backend_cpu_name(ggml_backend_t backend) {
+ return "CPU";
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_free(ggml_backend_t backend) {
+ struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
+ free(cpu_ctx->work_data);
+ free(cpu_ctx);
+ free(backend);
+}
+
+static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
+ return (void *)buffer->context;
+}
+
+static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
+ free(buffer->context);
+ UNUSED(buffer);
+}
+
+static struct ggml_backend_buffer_i cpu_backend_buffer_i = {
+ /* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer,
+ /* .get_base = */ ggml_backend_cpu_buffer_get_base,
+ /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
+ /* .init_tensor = */ NULL, // no initialization required
+ /* .free_tensor = */ NULL, // no cleanup required
+};
+
+// for buffers from ptr, free is not called
+static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = {
+ /* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed
+ /* .get_base = */ ggml_backend_cpu_buffer_get_base,
+ /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
+ /* .init_tensor = */ NULL,
+ /* .free_tensor = */ NULL,
+};
+
+static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512
+
+static ggml_backend_buffer_t ggml_backend_cpu_alloc_buffer(ggml_backend_t backend, size_t size) {
+ size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned
+ void * data = malloc(size); // TODO: maybe use GGML_ALIGNED_MALLOC?
+
+ GGML_ASSERT(data != NULL && "failed to allocate buffer");
+
+ return ggml_backend_buffer_init(backend, cpu_backend_buffer_i, data, size);
+}
+
+static size_t ggml_backend_cpu_get_alignment(ggml_backend_t backend) {
+ return TENSOR_ALIGNMENT;
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_set_tensor_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
+ GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
+
+ memcpy((char *)tensor->data + offset, data, size);
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_get_tensor_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
+ GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
+
+ memcpy(data, (const char *)tensor->data + offset, size);
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_synchronize(ggml_backend_t backend) {
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_cpy_tensor_from(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) {
+ ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_cpy_tensor_to(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) {
+ ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
+
+ UNUSED(backend);
+}
+
+struct ggml_backend_plan_cpu {
+ struct ggml_cplan cplan;
+ struct ggml_cgraph cgraph;
+};
+
+static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
+ struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
+
+ struct ggml_backend_plan_cpu * cpu_plan = malloc(sizeof(struct ggml_backend_plan_cpu));
+
+ cpu_plan->cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads);
+ cpu_plan->cgraph = *cgraph;
+
+ if (cpu_plan->cplan.work_size > 0) {
+ cpu_plan->cplan.work_data = malloc(cpu_plan->cplan.work_size);
+ }
+
+ return cpu_plan;
+}
+
+static void ggml_backend_cpu_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
+ struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan;
+
+ free(cpu_plan->cplan.work_data);
+ free(cpu_plan);
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
+ struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan;
+
+ ggml_graph_compute(&cpu_plan->cgraph, &cpu_plan->cplan);
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
+ struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
+
+ struct ggml_cplan cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads);
+
+ if (cpu_ctx->work_size < cplan.work_size) {
+ // TODO: may be faster to free and use malloc to avoid the copy
+ cpu_ctx->work_data = realloc(cpu_ctx->work_data, cplan.work_size);
+ cpu_ctx->work_size = cplan.work_size;
+ }
+
+ cplan.work_data = cpu_ctx->work_data;
+
+ ggml_graph_compute(cgraph, &cplan);
+}
+
+static bool ggml_backend_cpu_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
+ return true;
+ UNUSED(backend);
+ UNUSED(op);
+}
+
+static struct ggml_backend_i cpu_backend_i = {
+ /* .get_name = */ ggml_backend_cpu_name,
+ /* .free = */ ggml_backend_cpu_free,
+ /* .alloc_buffer = */ ggml_backend_cpu_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_cpu_get_alignment,
+ /* .set_tensor_async = */ ggml_backend_cpu_set_tensor_async,
+ /* .get_tensor_async = */ ggml_backend_cpu_get_tensor_async,
+ /* .synchronize = */ ggml_backend_cpu_synchronize,
+ /* .cpy_tensor_from = */ ggml_backend_cpu_cpy_tensor_from,
+ /* .cpy_tensor_to = */ ggml_backend_cpu_cpy_tensor_to,
+ /* .graph_plan_create = */ ggml_backend_cpu_graph_plan_create,
+ /* .graph_plan_free = */ ggml_backend_cpu_graph_plan_free,
+ /* .graph_plan_compute = */ ggml_backend_cpu_graph_plan_compute,
+ /* .graph_compute = */ ggml_backend_cpu_graph_compute,
+ /* .supports_op = */ ggml_backend_cpu_supports_op,
+};
+
+ggml_backend_t ggml_backend_cpu_init(void) {
+ struct ggml_backend_cpu_context * ctx = malloc(sizeof(struct ggml_backend_cpu_context));
+
+ ctx->n_threads = GGML_DEFAULT_N_THREADS;
+ ctx->work_data = NULL;
+ ctx->work_size = 0;
+
+ ggml_backend_t cpu_backend = malloc(sizeof(struct ggml_backend));
+
+ *cpu_backend = (struct ggml_backend) {
+ /* .interface = */ cpu_backend_i,
+ /* .context = */ ctx
+ };
+ return cpu_backend;
+}
+
+bool ggml_backend_is_cpu(ggml_backend_t backend) {
+ return backend->iface.get_name == ggml_backend_cpu_name;
+}
+
+void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) {
+ GGML_ASSERT(ggml_backend_is_cpu(backend_cpu));
+
+ struct ggml_backend_cpu_context * ctx = (struct ggml_backend_cpu_context *)backend_cpu->context;
+ ctx->n_threads = n_threads;
+}
+
+ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(ggml_backend_t backend_cpu, void * ptr, size_t size) {
+ return ggml_backend_buffer_init(backend_cpu, cpu_backend_buffer_i_from_ptr, ptr, size);
+}
+
+// scheduler
+
+#define GGML_MAX_BACKENDS 4
+#define GGML_MAX_SPLITS 256
+#define GGML_MAX_SPLIT_INPUTS 16
+
+struct ggml_backend_sched_split {
+ ggml_tallocr_t tallocr;
+ int i_start;
+ int i_end;
+ struct ggml_tensor * inputs[GGML_MAX_SPLIT_INPUTS];
+ int n_inputs;
+ struct ggml_cgraph * graph;
+};
+
+struct ggml_backend_sched {
+ int n_backends;
+ ggml_backend_t backends[GGML_MAX_BACKENDS];
+ ggml_tallocr_t tallocs[GGML_MAX_BACKENDS];
+
+ ggml_gallocr_t galloc;
+
+ struct ggml_hash_set hash_set;
+ ggml_tallocr_t * node_talloc; // [hash_set.size]
+ struct ggml_tensor * (* node_copies)[GGML_MAX_BACKENDS]; // [hash_set.size][GGML_MAX_BACKENDS]
+
+ struct ggml_cgraph * graph;
+ struct ggml_backend_sched_split splits[GGML_MAX_SPLITS];
+ int n_splits;
+
+ struct ggml_context * ctx;
+
+ // align context_buffer to GGML_MEM_ALIGN
+ #ifdef _MSC_VER
+ __declspec(align(GGML_MEM_ALIGN))
+ #else
+ __attribute__((aligned(GGML_MEM_ALIGN)))
+ #endif
+ char context_buffer[GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS*sizeof(struct ggml_tensor) + GGML_MAX_SPLITS*sizeof(struct ggml_cgraph)];
+};
+
+#define hash_id(node) ggml_hash_find_or_insert(sched->hash_set, node)
+#define node_allocr(node) sched->node_talloc[hash_id(node)]
+
+static bool ggml_is_view_op(enum ggml_op op) {
+ return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
+}
+
+// returns the priority of the backend, lower is better
+static int sched_backend_prio(ggml_backend_sched_t sched, ggml_backend_t backend) {
+ for (int i = 0; i < sched->n_backends; i++) {
+ if (sched->backends[i] == backend) {
+ return i;
+ }
+ }
+ return INT_MAX;
+}
+
+static int sched_allocr_prio(ggml_backend_sched_t sched, ggml_tallocr_t allocr) {
+ for (int i = 0; i < sched->n_backends; i++) {
+ if (sched->tallocs[i] == allocr) {
+ return i;
+ }
+ }
+ return INT_MAX;
+}
+
+// returns the backend that should be used for the node based on the current locations
+char causes[GGML_DEFAULT_GRAPH_SIZE*4 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug, remove
+static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * node) {
+ // if the dst tensor is already allocated in a buffer, we must assume that it is critical to keep it there
+ // ie. kv cache updates
+ // note that this doesn't allow fallback to CPU. need to add output tensors to the splits to copy the data back to the original backend.
+ // dst
+ ggml_backend_t cur_backend = ggml_get_backend(node);
+ if (cur_backend != NULL) {
+ sprintf(causes[hash_id(node)], "1.dst");
+ return cur_backend;
+ }
+
+ // view_src
+ if (node->view_src != NULL && ggml_get_backend(node->view_src) != NULL) {
+ sprintf(causes[hash_id(node)], "1.vsrc");
+ return ggml_get_backend(node->view_src);
+ }
+
+ // src
+ int cur_prio = INT_MAX;
+ size_t cur_size = 0;
+
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
+ const struct ggml_tensor * src = node->src[i];
+ if (src == NULL) {
+ break;
+ }
+ ggml_backend_t src_backend = ggml_get_backend(src);
+ if (src_backend != NULL) {
+ int src_prio = sched_backend_prio(sched, src_backend);
+ size_t src_size = ggml_nbytes(src);
+ if (src_prio < cur_prio && src_size >= cur_size) {
+ cur_prio = src_prio;
+ cur_size = src_size;
+ cur_backend = src_backend;
+ sprintf(causes[hash_id(node)], "1.src%d", i);
+ }
+ }
+ }
+ return cur_backend;
+}
+
+static char * fmt_size(size_t size) {
+ static char buffer[128];
+ if (size >= 1024*1024) {
+ sprintf(buffer, "%zuM", size/1024/1024);
+ } else {
+ sprintf(buffer, "%zuK", size/1024);
+ }
+ return buffer;
+}
+
+static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
+ int cur_split = 0;
+ for (int i = 0; i < graph->n_nodes; i++) {
+ if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
+ ggml_backend_t split_backend = ggml_tallocr_get_buffer(sched->splits[cur_split].tallocr)->backend;
+ fprintf(stderr, "\n## SPLIT #%d: %s # %d inputs: ", cur_split, ggml_backend_name(split_backend), sched->splits[cur_split].n_inputs);
+ for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
+ fprintf(stderr, "[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name, fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j])));
+ }
+ fprintf(stderr, "\n");
+ cur_split++;
+ }
+ struct ggml_tensor * node = graph->nodes[i];
+ if (ggml_is_view_op(node->op)) {
+ continue;
+ }
+ ggml_tallocr_t node_allocr = node_allocr(node);
+ ggml_backend_t node_backend = node_allocr ? ggml_tallocr_get_buffer(node_allocr)->backend : NULL;
+ fprintf(stderr, "node #%3d (%10.10s): %20.20s (%4.4s) [%4.4s %8.8s]:", i, ggml_op_name(node->op), node->name, fmt_size(ggml_nbytes(node)), node_allocr ? ggml_backend_name(node_backend) : "NULL", causes[hash_id(node)]);
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ break;
+ }
+ ggml_tallocr_t src_allocr = node_allocr(src);
+ ggml_backend_t src_backend = src_allocr ? ggml_tallocr_get_buffer(src_allocr)->backend : NULL;
+ fprintf(stderr, " %20.20s (%4.4s) [%4.4s %8.8s]", src->name, fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", causes[hash_id(src)]);
+ }
+ fprintf(stderr, "\n");
+ }
+}
+
+// creates a copy of the tensor with the same memory layout
+static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) {
+ struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor);
+ for (int i = 0; i < GGML_MAX_DIMS; i++) {
+ dup->nb[i] = tensor->nb[i];
+ }
+ return dup;
+}
+
+// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
+// TODO: merge passes
+static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
+ // reset state
+ size_t hash_size = sched->hash_set.size;
+ memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size);
+ memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size);
+ memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size);
+ sched->n_splits = 0;
+
+ struct ggml_init_params params = {
+ /*.mem_size = */ sizeof(sched->context_buffer),
+ /*.mem_buffer = */ sched->context_buffer,
+ /*.no_alloc = */ true
+ };
+
+ if (sched->ctx != NULL) {
+ ggml_free(sched->ctx);
+ }
+
+ sched->ctx = ggml_init(params);
+
+ // pass 1: assign backends to ops with allocated inputs
+ for (int i = 0; i < graph->n_leafs; i++) {
+ struct ggml_tensor * leaf = graph->leafs[i];
+ if (node_allocr(leaf) != NULL) {
+ // do not overwrite user assignments
+ continue;
+ }
+ ggml_backend_t leaf_backend = ggml_get_backend(leaf);
+ if (leaf_backend == NULL && leaf->view_src != NULL) {
+ leaf_backend = ggml_get_backend(leaf->view_src);
+ }
+ if (leaf_backend != NULL) {
+ node_allocr(leaf) = ggml_backend_sched_get_tallocr(sched, leaf_backend);
+ }
+ }
+
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ if (node_allocr(node) != NULL) {
+ // do not overwrite user assignments
+ continue;
+ }
+ ggml_backend_t node_backend = sched_backend_from_cur(sched, node);
+ if (node_backend != NULL) {
+ node_allocr(node) = ggml_backend_sched_get_tallocr(sched, node_backend);
+ }
+ }
+ //printf("PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+
+ // pass 2: assign backends to ops from current assignments
+ // TODO:
+ // - reuse sched_backend_from_cur
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ ggml_tallocr_t node_allocr = node_allocr(node);
+ if (node_allocr == NULL) {
+ int cur_prio = INT_MAX;
+ size_t cur_size = 0;
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ break;
+ }
+ ggml_tallocr_t src_allocr = node_allocr(src);
+ if (src_allocr != NULL) {
+ int src_prio = sched_allocr_prio(sched, src_allocr);
+ size_t src_size = ggml_nbytes(src);
+ if (src_prio < cur_prio && src_size >= cur_size) {
+ cur_prio = src_prio;
+ cur_size = src_size;
+ node_allocr = src_allocr;
+ sprintf(causes[hash_id(node)], "2.src%d", j);
+ }
+ }
+ }
+ if (node_allocr != NULL) {
+ node_allocr(node) = node_allocr;
+ }
+ }
+ }
+ //printf("PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+
+ // pass 3: assign backends to remaining src from dst (should only be leafs)
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ ggml_tallocr_t node_allocr = node_allocr(node);
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ break;
+ }
+ ggml_tallocr_t src_allocr = node_allocr(src);
+ if (src_allocr == NULL) {
+ node_allocr(src) = node_allocr;
+ }
+ }
+ }
+ //printf("PASS 3 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+
+ // pass 4: split graph, find tensors that need to be copied
+ // TODO:
+ // - when switching from a less preferred backend to a more preferred backend, check if it is possible to move the switch to an earlier point for the same cost
+ // find first backend
+ int cur_split = 0;
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ if (node->view_src == NULL) {
+ sched->splits[0].tallocr = node_allocr(node);
+ break;
+ }
+ }
+ sched->splits[0].i_start = 0;
+ sched->splits[0].n_inputs = 0;
+ memset(sched->splits[0].inputs, 0, sizeof(sched->splits[0].inputs)); //HACK
+ ggml_tallocr_t cur_allocr = sched->splits[0].tallocr;
+ size_t cur_backend_id = sched_allocr_prio(sched, cur_allocr);
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+
+ if (ggml_is_view_op(node->op)) {
+ continue;
+ }
+
+ ggml_tallocr_t node_allocr = node_allocr(node);
+
+ if (node_allocr != cur_allocr) {
+ sched->splits[cur_split].i_end = i;
+ cur_split++;
+ GGML_ASSERT(cur_split < GGML_MAX_SPLITS);
+ sched->splits[cur_split].tallocr = node_allocr;
+ sched->splits[cur_split].i_start = i;
+ sched->splits[cur_split].n_inputs = 0;
+ memset(sched->splits[cur_split].inputs, 0, sizeof(sched->splits[cur_split].inputs)); //HACK
+ cur_allocr = node_allocr;
+ cur_backend_id = sched_allocr_prio(sched, cur_allocr);
+ }
+
+ // find inputs that are not on the same backend
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ break;
+ }
+ ggml_tallocr_t src_allocr = node_allocr(src);
+ if (src_allocr != node_allocr) {
+ int n_inputs = sched->splits[cur_split].n_inputs++;
+ GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS);
+ sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src;
+
+ // create copies
+ size_t id = hash_id(src);
+ if (sched->node_copies[id][cur_backend_id] == NULL) {
+ struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
+ sched->node_copies[id][cur_backend_id] = tensor_copy;
+ node_allocr(tensor_copy) = cur_allocr;
+ ggml_backend_t backend = ggml_tallocr_get_buffer(cur_allocr)->backend;
+ ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name);
+ }
+ node->src[j] = sched->node_copies[id][cur_backend_id];
+ }
+ }
+ }
+ sched->splits[cur_split].i_end = graph->n_nodes;
+ sched->n_splits = cur_split + 1;
+
+ //fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); fflush(stdout);
+
+#if 1
+ // sanity check: all sources should have the same backend as the node
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ ggml_tallocr_t node_allocr = node_allocr(node);
+ if (node_allocr == NULL) {
+ fprintf(stderr, "!!!!!!! %s has no backend\n", node->name);
+ }
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ break;
+ }
+ ggml_tallocr_t src_allocr = node_allocr(src);
+ if (src_allocr != node_allocr /* && src_backend != NULL */) { // ignore nulls for now
+ fprintf(stderr, "!!!! %s has backend %s, src %d (%s) has backend %s\n",
+ node->name, node_allocr ? ggml_backend_name(ggml_tallocr_get_buffer(node_allocr)->backend) : "NULL",
+ j, src->name, src_allocr ? ggml_backend_name(ggml_tallocr_get_buffer(src_allocr)->backend) : "NULL");
+ }
+ }
+ }
+#endif
+
+ // create copies of the graph for each split
+ // FIXME: avoid this copy, pass split inputs to ggml_gallocr_alloc_graph_n in some other way
+ struct ggml_cgraph * graph_copy = ggml_new_graph_custom(sched->ctx, graph->n_nodes + sched->n_splits*GGML_MAX_SPLIT_INPUTS, false);
+ for (int i = 0; i < sched->n_splits; i++) {
+ struct ggml_backend_sched_split * split = &sched->splits[i];
+ split->graph = ggml_graph_view(sched->ctx, graph, split->i_start, split->i_end);
+
+ // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split
+ for (int j = 0; j < split->n_inputs; j++) {
+ struct ggml_tensor * input = split->inputs[j];
+ struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_allocr_prio(sched, split->tallocr)];
+ input_cpy->src[0] = input;
+ graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
+ }
+
+ for (int j = split->i_start; j < split->i_end; j++) {
+ graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j];
+ }
+ }
+ sched->graph = graph_copy;
+}
+
+static void sched_alloc_splits(ggml_backend_sched_t sched) {
+ ggml_gallocr_alloc_graph_n(
+ sched->galloc,
+ sched->graph,
+ sched->hash_set,
+ sched->node_talloc);
+}
+
+static void sched_compute_splits(ggml_backend_sched_t sched) {
+ uint64_t copy_us[GGML_MAX_BACKENDS] = {0};
+ uint64_t compute_us[GGML_MAX_BACKENDS] = {0};
+
+ struct ggml_backend_sched_split * splits = sched->splits;
+
+ for (int i = 0; i < sched->n_splits; i++) {
+ struct ggml_backend_sched_split * split = &splits[i];
+ ggml_backend_t split_backend = ggml_tallocr_get_buffer(split->tallocr)->backend;
+ int split_backend_id = sched_backend_prio(sched, split_backend);
+
+ // copy the input tensors to the split backend
+ uint64_t copy_start_us = ggml_time_us();
+ for (int j = 0; j < split->n_inputs; j++) {
+ struct ggml_tensor * input_cpy = sched->node_copies[hash_id(split->inputs[j])][sched_backend_prio(sched, split_backend)];
+ if (split->inputs[j]->buffer == NULL) {
+ if (split->inputs[j]->view_src == NULL) {
+ fprintf(stderr, "input %s has no buffer and no view_src\n", split->inputs[j]->name);
+ exit(1);
+ }
+ struct ggml_tensor * view = split->inputs[j];
+ view->backend = view->view_src->backend;
+ view->buffer = view->view_src->buffer;
+ view->data = (char *)view->view_src->data + view->view_offs;
+ ggml_backend_buffer_init_tensor(ggml_backend_sched_get_buffer(sched, view->buffer->backend), view);
+ }
+ if (input_cpy->buffer == NULL) {
+ fprintf(stderr, "input_cpy %s has no buffer\n", input_cpy->name);
+ exit(1);
+ }
+ GGML_ASSERT(split->inputs[j]->buffer->backend != input_cpy->buffer->backend);
+ GGML_ASSERT(input_cpy->buffer->backend == split_backend);
+ ggml_backend_tensor_copy(split->inputs[j], input_cpy);
+ }
+ // ggml_backend_synchronize(split_backend);
+ int64_t copy_end_us = ggml_time_us();
+ copy_us[split_backend_id] += copy_end_us - copy_start_us;
+
+#if 0
+ char split_filename[GGML_MAX_NAME];
+ snprintf(split_filename, GGML_MAX_NAME, "split_%i_%s.dot", i, ggml_backend_name(split_backend));
+ ggml_graph_dump_dot(split->graph, NULL, split_filename);
+#endif
+
+ uint64_t compute_start_us = ggml_time_us();
+ ggml_backend_graph_compute(split_backend, split->graph);
+ // ggml_backend_synchronize(split_backend);
+ uint64_t compute_end_us = ggml_time_us();
+ compute_us[split_backend_id] += compute_end_us - compute_start_us;
+ }
+
+#if 0
+ // per-backend timings
+ fprintf(stderr, "sched_compute_splits times (%d splits):\n", sched->n_splits);
+ for (int i = 0; i < sched->n_backends; i++) {
+ if (copy_us[i] > 0 || compute_us[i] > 0) {
+ fprintf(stderr, "\t%5.5s: %lu us copy, %lu us compute\n", ggml_backend_name(sched->backends[i]), copy_us[i], compute_us[i]);
+ }
+ }
+#endif
+}
+
+static void sched_reset(ggml_backend_sched_t sched) {
+ for (int i = 0; i < sched->n_backends; i++) {
+ ggml_tallocr_reset(sched->tallocs[i]);
+ }
+}
+
+ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends) {
+ GGML_ASSERT(n_backends <= GGML_MAX_BACKENDS);
+
+ struct ggml_backend_sched * sched = malloc(sizeof(struct ggml_backend_sched));
+ memset(sched, 0, sizeof(struct ggml_backend_sched));
+
+ fprintf(stderr, "ggml_backend_sched size: %lu KB\n", sizeof(struct ggml_backend_sched)/1024);
+
+ sched->n_backends = n_backends;
+ for (int i = 0; i < n_backends; i++) {
+ sched->backends[i] = backends[i];
+ }
+
+ sched->galloc = ggml_gallocr_new();
+
+ // init measure allocs for each backend
+ for (int i = 0; i < n_backends; i++) {
+ sched->tallocs[i] = ggml_tallocr_new_measure_from_backend(backends[i]);
+ }
+
+ return sched;
+}
+
+void ggml_backend_sched_free(ggml_backend_sched_t sched) {
+ if (sched == NULL) {
+ return;
+ }
+ for (int i = 0; i < sched->n_backends; i++) {
+ ggml_tallocr_free(sched->tallocs[i]);
+ }
+ ggml_gallocr_free(sched->galloc);
+ free(sched->hash_set.keys);
+ free(sched->node_talloc);
+ free(sched->node_copies);
+ free(sched);
+}
+
+void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
+ // initialize hash tables
+ size_t hash_size = measure_graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS;
+ sched->hash_set.size = hash_size;
+ sched->hash_set.keys = malloc(sizeof(sched->hash_set.keys[0]) * hash_size);
+ sched->node_talloc = malloc(sizeof(sched->node_talloc[0]) * hash_size);
+ sched->node_copies = malloc(sizeof(sched->node_copies[0]) * hash_size);
+
+ sched_split_graph(sched, measure_graph);
+ sched_alloc_splits(sched);
+
+ // allocate buffers and reset allocators
+ for (int i = 0; i < sched->n_backends; i++) {
+ size_t size = ggml_tallocr_max_size(sched->tallocs[i]);
+ ggml_tallocr_free(sched->tallocs[i]);
+ sched->tallocs[i] = ggml_tallocr_new_from_backend(sched->backends[i], size);
+ }
+
+ sched_reset(sched);
+}
+
+void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
+ GGML_ASSERT(sched->hash_set.size >= graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
+
+ sched_split_graph(sched, graph);
+ sched_alloc_splits(sched);
+ sched_compute_splits(sched);
+ sched_reset(sched);
+}
+
+ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend) {
+ int backend_index = sched_backend_prio(sched, backend);
+ return sched->tallocs[backend_index];
+}
+
+ggml_backend_buffer_t ggml_backend_sched_get_buffer(ggml_backend_sched_t sched, ggml_backend_t backend) {
+ int backend_index = sched_backend_prio(sched, backend);
+ return ggml_tallocr_get_buffer(sched->tallocs[backend_index]);
+}
+
+void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) {
+ int backend_index = sched_backend_prio(sched, backend);
+ GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
+ node_allocr(node) = sched->tallocs[backend_index];
+}
--- /dev/null
+#pragma once
+
+#include "ggml.h"
+#include "ggml-alloc.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ //
+ // Backend buffer
+ //
+
+ struct ggml_backend_buffer;
+ typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
+
+ // backend buffer functions
+ GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer);
+ GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
+ GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer);
+ GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer);
+ GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
+ GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
+ GGML_API void ggml_backend_buffer_free_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
+
+ //
+ // Backend
+ //
+
+ struct ggml_backend;
+ typedef struct ggml_backend * ggml_backend_t;
+ typedef void * ggml_backend_graph_plan_t;
+
+ GGML_API ggml_backend_t ggml_get_backend(const struct ggml_tensor * tensor);
+
+ GGML_API const char * ggml_backend_name(ggml_backend_t backend);
+ GGML_API void ggml_backend_free(ggml_backend_t backend);
+
+ GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size);
+
+ GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend);
+
+ GGML_API void ggml_backend_tensor_set_async( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
+ GGML_API void ggml_backend_tensor_get_async(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
+
+ GGML_API void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
+ GGML_API void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
+
+ GGML_API void ggml_backend_synchronize(ggml_backend_t backend);
+
+ GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create (ggml_backend_t backend, struct ggml_cgraph * cgraph);
+
+ GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
+ GGML_API void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
+ GGML_API void ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
+ GGML_API bool ggml_backend_supports_op (ggml_backend_t backend, const struct ggml_tensor * op);
+
+ // tensor copy between different backends
+ GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst);
+
+ //
+ // CPU backend
+ //
+
+ GGML_API ggml_backend_t ggml_backend_cpu_init(void);
+
+ GGML_API bool ggml_backend_is_cpu(ggml_backend_t backend);
+ GGML_API void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads);
+
+ // Create a backend buffer from an existing pointer
+ GGML_API ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(ggml_backend_t backend_cpu, void * ptr, size_t size);
+
+
+ //
+ // Backend scheduler
+ //
+
+ // The backend scheduler allows for multiple backends to be used together
+ // Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends
+ // The backends are selected based on:
+ // - the backend that supports the operation
+ // - the location of the pre-allocated tensors (e.g. the weights)
+ /*
+ Example usage:
+
+ sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, num_backends);
+ // sched is initialized with measure allocators and cannot be used until allocated with a measure graph
+
+ // initialize buffers from a measure graph
+ measure_graph = build_graph(sched); // use the allocr to allocate inputs as needed
+
+ // in build_graph:
+ build_graph(...) {
+ // allocating tensors in a specific backend (optional, recommended: pre-allocate inputs in a different buffer)
+ alloc_cpu = ggml_backend_sched_get_allocr(sched, backend_cpu);
+ ggml_allocr_alloc(alloc_cpu, tensor);
+
+ // manually assigning nodes to a backend (optional, shouldn't be needed in most cases)
+ struct ggml_tensor * node = ggml_mul_mat(ctx, ...);
+ ggml_backend_sched_set_node_backend(sched, node, backend_gpu);
+ }
+
+ // allocate backend buffers from measure graph
+ ggml_backend_sched_init_measure(sched, measure_graph);
+
+ // the scheduler is now ready to compute graphs
+
+ // compute
+ graph = build_graph(sched);
+ ggml_backend_sched_graph_compute(sched, graph);
+ */
+
+ struct ggml_backend_sched;
+ typedef struct ggml_backend_sched * ggml_backend_sched_t;
+
+ // Initialize a backend scheduler
+ GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends);
+
+ GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
+
+ // Initialize backend buffers from a measure graph
+ GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
+
+ GGML_API ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend);
+ GGML_API ggml_backend_buffer_t ggml_backend_sched_get_buffer (ggml_backend_sched_t sched, ggml_backend_t backend);
+
+ GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
+
+ // Allocate a graph on the backend scheduler
+ GGML_API void ggml_backend_sched_graph_compute(
+ ggml_backend_sched_t sched,
+ struct ggml_cgraph * graph);
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+#pragma once
+
+#include "ggml.h"
+
+// GGML internal header
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h> // memcpy
+#include <math.h> // fabsf
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// static_assert should be a #define, but if it's not,
+// fall back to the _Static_assert C11 keyword.
+// if C99 - static_assert is noop
+// ref: https://stackoverflow.com/a/53923785/4039976
+#ifndef static_assert
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
+#define static_assert(cond, msg) _Static_assert(cond, msg)
+#else
+#define static_assert(cond, msg) struct global_scope_noop_trick
+#endif
+#endif
+
+// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
+#if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
+#ifndef __FMA__
+#define __FMA__
+#endif
+#ifndef __F16C__
+#define __F16C__
+#endif
+#ifndef __SSE3__
+#define __SSE3__
+#endif
+#endif
+
+#undef MIN
+#undef MAX
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+// 16-bit float
+// on Arm, we use __fp16
+// on x86, we use uint16_t
+#if defined(__ARM_NEON) && !defined(_MSC_VER)
+
+// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
+//
+// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
+//
+#include <arm_neon.h>
+
+#define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
+#define GGML_COMPUTE_FP32_TO_FP16(x) (x)
+
+#define GGML_FP16_TO_FP32(x) ((float) (x))
+#define GGML_FP32_TO_FP16(x) (x)
+
+#else
+
+#ifdef __wasm_simd128__
+#include <wasm_simd128.h>
+#else
+#ifdef __POWER9_VECTOR__
+#include <altivec.h>
+#undef bool
+#define bool _Bool
+#else
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#include <intrin.h>
+#else
+#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
+#if !defined(__riscv)
+#include <immintrin.h>
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifdef __riscv_v_intrinsic
+#include <riscv_vector.h>
+#endif
+
+#ifdef __F16C__
+
+#ifdef _MSC_VER
+#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
+#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
+#else
+#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
+#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
+#endif
+
+#elif defined(__POWER9_VECTOR__)
+
+#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
+#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
+/* the inline asm below is about 12% faster than the lookup method */
+#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
+#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
+
+static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
+ register float f;
+ register double d;
+ __asm__(
+ "mtfprd %0,%2\n"
+ "xscvhpdp %0,%0\n"
+ "frsp %1,%0\n" :
+ /* temp */ "=d"(d),
+ /* out */ "=f"(f):
+ /* in */ "r"(h));
+ return f;
+}
+
+static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
+ register double d;
+ register ggml_fp16_t r;
+ __asm__( /* xscvdphp can work on double or single precision */
+ "xscvdphp %0,%2\n"
+ "mffprd %1,%0\n" :
+ /* temp */ "=d"(d),
+ /* out */ "=r"(r):
+ /* in */ "f"(f));
+ return r;
+}
+
+#else
+
+// FP16 <-> FP32
+// ref: https://github.com/Maratyszcza/FP16
+
+static inline float fp32_from_bits(uint32_t w) {
+ union {
+ uint32_t as_bits;
+ float as_value;
+ } fp32;
+ fp32.as_bits = w;
+ return fp32.as_value;
+}
+
+static inline uint32_t fp32_to_bits(float f) {
+ union {
+ float as_value;
+ uint32_t as_bits;
+ } fp32;
+ fp32.as_value = f;
+ return fp32.as_bits;
+}
+
+static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
+ const uint32_t w = (uint32_t) h << 16;
+ const uint32_t sign = w & UINT32_C(0x80000000);
+ const uint32_t two_w = w + w;
+
+ const uint32_t exp_offset = UINT32_C(0xE0) << 23;
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
+ const float exp_scale = 0x1.0p-112f;
+#else
+ const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
+#endif
+ const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
+
+ const uint32_t magic_mask = UINT32_C(126) << 23;
+ const float magic_bias = 0.5f;
+ const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
+
+ const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
+ const uint32_t result = sign |
+ (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
+ return fp32_from_bits(result);
+}
+
+static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
+ const float scale_to_inf = 0x1.0p+112f;
+ const float scale_to_zero = 0x1.0p-110f;
+#else
+ const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
+ const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
+#endif
+ float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
+
+ const uint32_t w = fp32_to_bits(f);
+ const uint32_t shl1_w = w + w;
+ const uint32_t sign = w & UINT32_C(0x80000000);
+ uint32_t bias = shl1_w & UINT32_C(0xFF000000);
+ if (bias < UINT32_C(0x71000000)) {
+ bias = UINT32_C(0x71000000);
+ }
+
+ base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
+ const uint32_t bits = fp32_to_bits(base);
+ const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
+ const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
+ const uint32_t nonsign = exp_bits + mantissa_bits;
+ return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
+}
+
+#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
+#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
+
+#endif // __F16C__
+
+#endif // __ARM_NEON
+
+// precomputed f32 table for f16 (256 KB)
+// defined in ggml.c, initialized in ggml_init()
+extern float ggml_table_f32_f16[1 << 16];
+
+// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
+// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
+// This is also true for POWER9.
+#if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
+
+inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
+ uint16_t s;
+ memcpy(&s, &f, sizeof(uint16_t));
+ return ggml_table_f32_f16[s];
+}
+
+#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
+#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
+
+#endif
+
+#define GGML_HASHTABLE_FULL ((size_t)-1)
+#define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
+
+bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
+
+// returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
+size_t ggml_hash_find (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
+
+// returns GGML_HAHSHTABLE_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
+size_t ggml_hash_insert ( struct ggml_hash_set hash_set, struct ggml_tensor * key);
+
+// return index, asserts if table is full
+size_t ggml_hash_find_or_insert( struct ggml_hash_set hash_set, struct ggml_tensor * key);
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+#include "ggml-quants.h"
+#include "ggml-impl.h"
+
+#include <math.h>
+#include <string.h>
+#include <assert.h>
+#include <float.h>
+
+#ifdef __ARM_NEON
+
+// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
+//
+// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
+//
+#include <arm_neon.h>
+
+#if !defined(__aarch64__)
+inline static int32_t vaddvq_s16(int16x8_t v) {
+ return
+ (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
+ (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
+ (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
+ (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
+}
+
+inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {
+ int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a));
+ int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b));
+ return vcombine_s16(a0, b0);
+}
+
+inline static int32_t vaddvq_s32(int32x4_t v) {
+ return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
+}
+#endif
+
+#else
+
+#ifdef __wasm_simd128__
+#include <wasm_simd128.h>
+#else
+#ifdef __POWER9_VECTOR__
+#include <altivec.h>
+#undef bool
+#define bool _Bool
+#else
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#include <intrin.h>
+#else
+#if !defined(__riscv) && !defined(__s390__)
+#include <immintrin.h>
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifdef __riscv_v_intrinsic
+#include <riscv_vector.h>
+#endif
+
+#undef MIN
+#undef MAX
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
+
+#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
+// multiply int8_t, add results pairwise twice
+static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
+ // Get absolute values of x vectors
+ const __m128i ax = _mm_sign_epi8(x, x);
+ // Sign the values of the y vectors
+ const __m128i sy = _mm_sign_epi8(y, x);
+ // Perform multiplication and create 16-bit values
+ const __m128i dot = _mm_maddubs_epi16(ax, sy);
+ const __m128i ones = _mm_set1_epi16(1);
+ return _mm_madd_epi16(ones, dot);
+}
+
+#if __AVX__ || __AVX2__ || __AVX512F__
+// horizontally add 8 floats
+static inline float hsum_float_8(const __m256 x) {
+ __m128 res = _mm256_extractf128_ps(x, 1);
+ res = _mm_add_ps(res, _mm256_castps256_ps128(x));
+ res = _mm_add_ps(res, _mm_movehl_ps(res, res));
+ res = _mm_add_ss(res, _mm_movehdup_ps(res));
+ return _mm_cvtss_f32(res);
+}
+
+// horizontally add 8 int32_t
+static inline int hsum_i32_8(const __m256i a) {
+ const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
+ const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
+ const __m128i sum64 = _mm_add_epi32(hi64, sum128);
+ const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
+ return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
+}
+
+// horizontally add 4 int32_t
+static inline int hsum_i32_4(const __m128i a) {
+ const __m128i hi64 = _mm_unpackhi_epi64(a, a);
+ const __m128i sum64 = _mm_add_epi32(hi64, a);
+ const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
+ return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
+}
+
+#if defined(__AVX2__) || defined(__AVX512F__)
+// spread 32 bits to 32 bytes { 0x00, 0xFF }
+static inline __m256i bytes_from_bits_32(const uint8_t * x) {
+ uint32_t x32;
+ memcpy(&x32, x, sizeof(uint32_t));
+ const __m256i shuf_mask = _mm256_set_epi64x(
+ 0x0303030303030303, 0x0202020202020202,
+ 0x0101010101010101, 0x0000000000000000);
+ __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
+ const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
+ bytes = _mm256_or_si256(bytes, bit_mask);
+ return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
+}
+
+// Unpack 32 4-bit fields into 32 bytes
+// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
+static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
+{
+ const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
+ const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
+ const __m256i lowMask = _mm256_set1_epi8( 0xF );
+ return _mm256_and_si256(lowMask, bytes);
+}
+
+// add int16_t pairwise and return as float vector
+static inline __m256 sum_i16_pairs_float(const __m256i x) {
+ const __m256i ones = _mm256_set1_epi16(1);
+ const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
+ return _mm256_cvtepi32_ps(summed_pairs);
+}
+
+static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
+#if __AVXVNNI__
+ const __m256i zero = _mm256_setzero_si256();
+ const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
+ return _mm256_cvtepi32_ps(summed_pairs);
+#else
+ // Perform multiplication and create 16-bit values
+ const __m256i dot = _mm256_maddubs_epi16(ax, sy);
+ return sum_i16_pairs_float(dot);
+#endif
+}
+
+// multiply int8_t, add results pairwise twice and return as float vector
+static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
+#if __AVXVNNIINT8__
+ const __m256i zero = _mm256_setzero_si256();
+ const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
+ return _mm256_cvtepi32_ps(summed_pairs);
+#else
+ // Get absolute values of x vectors
+ const __m256i ax = _mm256_sign_epi8(x, x);
+ // Sign the values of the y vectors
+ const __m256i sy = _mm256_sign_epi8(y, x);
+ return mul_sum_us8_pairs_float(ax, sy);
+#endif
+}
+
+static inline __m128i packNibbles( __m256i bytes )
+{
+ // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
+#if __AVX512F__
+ const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
+ bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
+ return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
+#else
+ const __m256i lowByte = _mm256_set1_epi16( 0xFF );
+ __m256i high = _mm256_andnot_si256( lowByte, bytes );
+ __m256i low = _mm256_and_si256( lowByte, bytes );
+ high = _mm256_srli_epi16( high, 4 );
+ bytes = _mm256_or_si256( low, high );
+
+ // Compress uint16_t lanes into bytes
+ __m128i r0 = _mm256_castsi256_si128( bytes );
+ __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
+ return _mm_packus_epi16( r0, r1 );
+#endif
+}
+#elif defined(__AVX__)
+// spread 32 bits to 32 bytes { 0x00, 0xFF }
+static inline __m256i bytes_from_bits_32(const uint8_t * x) {
+ uint32_t x32;
+ memcpy(&x32, x, sizeof(uint32_t));
+ const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
+ const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
+ __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
+ __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
+ const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
+ bytesl = _mm_or_si128(bytesl, bit_mask);
+ bytesh = _mm_or_si128(bytesh, bit_mask);
+ bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
+ bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
+ return MM256_SET_M128I(bytesh, bytesl);
+}
+
+// Unpack 32 4-bit fields into 32 bytes
+// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
+static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
+{
+ // Load 16 bytes from memory
+ __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
+ __m128i tmph = _mm_srli_epi16(tmpl, 4);
+ const __m128i lowMask = _mm_set1_epi8(0xF);
+ tmpl = _mm_and_si128(lowMask, tmpl);
+ tmph = _mm_and_si128(lowMask, tmph);
+ return MM256_SET_M128I(tmph, tmpl);
+}
+
+// add int16_t pairwise and return as float vector
+static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
+ const __m128i ones = _mm_set1_epi16(1);
+ const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
+ const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
+ const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
+ return _mm256_cvtepi32_ps(summed_pairs);
+}
+
+static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
+ const __m128i axl = _mm256_castsi256_si128(ax);
+ const __m128i axh = _mm256_extractf128_si256(ax, 1);
+ const __m128i syl = _mm256_castsi256_si128(sy);
+ const __m128i syh = _mm256_extractf128_si256(sy, 1);
+ // Perform multiplication and create 16-bit values
+ const __m128i dotl = _mm_maddubs_epi16(axl, syl);
+ const __m128i doth = _mm_maddubs_epi16(axh, syh);
+ return sum_i16_pairs_float(doth, dotl);
+}
+
+// multiply int8_t, add results pairwise twice and return as float vector
+static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
+ const __m128i xl = _mm256_castsi256_si128(x);
+ const __m128i xh = _mm256_extractf128_si256(x, 1);
+ const __m128i yl = _mm256_castsi256_si128(y);
+ const __m128i yh = _mm256_extractf128_si256(y, 1);
+ // Get absolute values of x vectors
+ const __m128i axl = _mm_sign_epi8(xl, xl);
+ const __m128i axh = _mm_sign_epi8(xh, xh);
+ // Sign the values of the y vectors
+ const __m128i syl = _mm_sign_epi8(yl, xl);
+ const __m128i syh = _mm_sign_epi8(yh, xh);
+ // Perform multiplication and create 16-bit values
+ const __m128i dotl = _mm_maddubs_epi16(axl, syl);
+ const __m128i doth = _mm_maddubs_epi16(axh, syh);
+ return sum_i16_pairs_float(doth, dotl);
+}
+
+static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
+{
+ // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
+ const __m128i lowByte = _mm_set1_epi16( 0xFF );
+ __m128i high = _mm_andnot_si128( lowByte, bytes1 );
+ __m128i low = _mm_and_si128( lowByte, bytes1 );
+ high = _mm_srli_epi16( high, 4 );
+ bytes1 = _mm_or_si128( low, high );
+ high = _mm_andnot_si128( lowByte, bytes2 );
+ low = _mm_and_si128( lowByte, bytes2 );
+ high = _mm_srli_epi16( high, 4 );
+ bytes2 = _mm_or_si128( low, high );
+
+ return _mm_packus_epi16( bytes1, bytes2);
+}
+#endif
+#elif defined(__SSSE3__)
+// horizontally add 4x4 floats
+static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
+ __m128 res_0 =_mm_hadd_ps(a, b);
+ __m128 res_1 =_mm_hadd_ps(c, d);
+ __m128 res =_mm_hadd_ps(res_0, res_1);
+ res =_mm_hadd_ps(res, res);
+ res =_mm_hadd_ps(res, res);
+
+ return _mm_cvtss_f32(res);
+}
+#endif // __AVX__ || __AVX2__ || __AVX512F__
+#endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
+
+#if defined(__ARM_NEON)
+
+#if !defined(__aarch64__)
+
+inline static int32_t vaddvq_s32(int32x4_t v) {
+ return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
+}
+
+inline static float vaddvq_f32(float32x4_t v) {
+ return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
+}
+
+inline static float vmaxvq_f32(float32x4_t v) {
+ return
+ MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
+ MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
+}
+
+inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
+ int32x4_t res;
+
+ res[0] = roundf(vgetq_lane_f32(v, 0));
+ res[1] = roundf(vgetq_lane_f32(v, 1));
+ res[2] = roundf(vgetq_lane_f32(v, 2));
+ res[3] = roundf(vgetq_lane_f32(v, 3));
+
+ return res;
+}
+
+#endif
+#endif
+
+#if defined(__ARM_NEON) || defined(__wasm_simd128__)
+#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
+#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
+#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
+#define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
+#define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
+#define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
+#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
+#define B8(c,s ) B7(c,s, c), B7(c,s, s)
+
+// precomputed tables for expanding 8bits to 8 bytes:
+static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
+static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
+#endif
+
+// reference implementation for deterministic creation of model files
+void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
+ static const int qk = QK4_0;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+ float max = 0.0f;
+
+ for (int j = 0; j < qk; j++) {
+ const float v = x[i*qk + j];
+ if (amax < fabsf(v)) {
+ amax = fabsf(v);
+ max = v;
+ }
+ }
+
+ const float d = max / -8;
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ for (int j = 0; j < qk/2; ++j) {
+ const float x0 = x[i*qk + 0 + j]*id;
+ const float x1 = x[i*qk + qk/2 + j]*id;
+
+ const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
+ const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
+
+ y[i].qs[j] = xi0;
+ y[i].qs[j] |= xi1 << 4;
+ }
+ }
+}
+
+void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q4_0_reference(x, y, k);
+}
+
+void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
+ const int qk = QK4_1;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ float min = FLT_MAX;
+ float max = -FLT_MAX;
+
+ for (int j = 0; j < qk; j++) {
+ const float v = x[i*qk + j];
+
+ if (v < min) min = v;
+ if (v > max) max = v;
+ }
+
+ const float d = (max - min) / ((1 << 4) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+ y[i].m = GGML_FP32_TO_FP16(min);
+
+ for (int j = 0; j < qk/2; ++j) {
+ const float x0 = (x[i*qk + 0 + j] - min)*id;
+ const float x1 = (x[i*qk + qk/2 + j] - min)*id;
+
+ const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
+ const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
+
+ y[i].qs[j] = xi0;
+ y[i].qs[j] |= xi1 << 4;
+ }
+ }
+}
+
+void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q4_1_reference(x, y, k);
+}
+
+void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
+ static const int qk = QK5_0;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+ float max = 0.0f;
+
+ for (int j = 0; j < qk; j++) {
+ const float v = x[i*qk + j];
+ if (amax < fabsf(v)) {
+ amax = fabsf(v);
+ max = v;
+ }
+ }
+
+ const float d = max / -16;
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ uint32_t qh = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const float x0 = x[i*qk + 0 + j]*id;
+ const float x1 = x[i*qk + qk/2 + j]*id;
+
+ const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
+ const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
+
+ y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
+
+ // get the 5-th bit and store it in qh at the right position
+ qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
+ qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
+ }
+
+ memcpy(&y[i].qh, &qh, sizeof(qh));
+ }
+}
+
+void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q5_0_reference(x, y, k);
+}
+
+void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
+ const int qk = QK5_1;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ float min = FLT_MAX;
+ float max = -FLT_MAX;
+
+ for (int j = 0; j < qk; j++) {
+ const float v = x[i*qk + j];
+
+ if (v < min) min = v;
+ if (v > max) max = v;
+ }
+
+ const float d = (max - min) / ((1 << 5) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+ y[i].m = GGML_FP32_TO_FP16(min);
+
+ uint32_t qh = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const float x0 = (x[i*qk + 0 + j] - min)*id;
+ const float x1 = (x[i*qk + qk/2 + j] - min)*id;
+
+ const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
+ const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
+
+ y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
+
+ // get the 5-th bit and store it in qh at the right position
+ qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
+ qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
+ }
+
+ memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
+ }
+}
+
+void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q5_1_reference(x, y, k);
+}
+
+// reference implementation for deterministic creation of model files
+void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
+ assert(k % QK8_0 == 0);
+ const int nb = k / QK8_0;
+
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+
+ for (int j = 0; j < QK8_0; j++) {
+ const float v = x[i*QK8_0 + j];
+ amax = MAX(amax, fabsf(v));
+ }
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ for (int j = 0; j < QK8_0; ++j) {
+ const float x0 = x[i*QK8_0 + j]*id;
+
+ y[i].qs[j] = roundf(x0);
+ }
+ }
+}
+
+void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
+ assert(QK8_0 == 32);
+ assert(k % QK8_0 == 0);
+ const int nb = k / QK8_0;
+
+ block_q8_0 * restrict y = vy;
+
+#if defined(__ARM_NEON)
+ for (int i = 0; i < nb; i++) {
+ float32x4_t srcv [8];
+ float32x4_t asrcv[8];
+ float32x4_t amaxv[8];
+
+ for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
+ for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
+
+ for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
+ for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
+ for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
+
+ const float amax = vmaxvq_f32(amaxv[0]);
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ for (int j = 0; j < 8; j++) {
+ const float32x4_t v = vmulq_n_f32(srcv[j], id);
+ const int32x4_t vi = vcvtnq_s32_f32(v);
+
+ y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
+ y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
+ y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
+ y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
+ }
+ }
+#elif defined(__wasm_simd128__)
+ for (int i = 0; i < nb; i++) {
+ v128_t srcv [8];
+ v128_t asrcv[8];
+ v128_t amaxv[8];
+
+ for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
+ for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
+
+ for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
+ for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
+ for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
+
+ const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
+ wasm_f32x4_extract_lane(amaxv[0], 1)),
+ MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
+ wasm_f32x4_extract_lane(amaxv[0], 3)));
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ for (int j = 0; j < 8; j++) {
+ const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
+ const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
+
+ y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
+ y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
+ y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
+ y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
+ }
+ }
+#elif defined(__AVX2__) || defined(__AVX__)
+ for (int i = 0; i < nb; i++) {
+ // Load elements into 4 AVX vectors
+ __m256 v0 = _mm256_loadu_ps( x );
+ __m256 v1 = _mm256_loadu_ps( x + 8 );
+ __m256 v2 = _mm256_loadu_ps( x + 16 );
+ __m256 v3 = _mm256_loadu_ps( x + 24 );
+ x += 32;
+
+ // Compute max(abs(e)) for the block
+ const __m256 signBit = _mm256_set1_ps( -0.0f );
+ __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
+
+ __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
+ max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
+ max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
+ const float maxScalar = _mm_cvtss_f32( max4 );
+
+ // Quantize these floats
+ const float d = maxScalar / 127.f;
+ y[i].d = GGML_FP32_TO_FP16(d);
+ const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
+ const __m256 mul = _mm256_set1_ps( id );
+
+ // Apply the multiplier
+ v0 = _mm256_mul_ps( v0, mul );
+ v1 = _mm256_mul_ps( v1, mul );
+ v2 = _mm256_mul_ps( v2, mul );
+ v3 = _mm256_mul_ps( v3, mul );
+
+ // Round to nearest integer
+ v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
+ v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
+ v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
+ v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
+
+ // Convert floats to integers
+ __m256i i0 = _mm256_cvtps_epi32( v0 );
+ __m256i i1 = _mm256_cvtps_epi32( v1 );
+ __m256i i2 = _mm256_cvtps_epi32( v2 );
+ __m256i i3 = _mm256_cvtps_epi32( v3 );
+
+#if defined(__AVX2__)
+ // Convert int32 to int16
+ i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
+ i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
+ // Convert int16 to int8
+ i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+
+ // We got our precious signed bytes, but the order is now wrong
+ // These AVX2 pack instructions process 16-byte pieces independently
+ // The following instruction is fixing the order
+ const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
+ i0 = _mm256_permutevar8x32_epi32( i0, perm );
+
+ _mm256_storeu_si256((__m256i *)y[i].qs, i0);
+#else
+ // Since we don't have in AVX some necessary functions,
+ // we split the registers in half and call AVX2 analogs from SSE
+ __m128i ni0 = _mm256_castsi256_si128( i0 );
+ __m128i ni1 = _mm256_extractf128_si256( i0, 1);
+ __m128i ni2 = _mm256_castsi256_si128( i1 );
+ __m128i ni3 = _mm256_extractf128_si256( i1, 1);
+ __m128i ni4 = _mm256_castsi256_si128( i2 );
+ __m128i ni5 = _mm256_extractf128_si256( i2, 1);
+ __m128i ni6 = _mm256_castsi256_si128( i3 );
+ __m128i ni7 = _mm256_extractf128_si256( i3, 1);
+
+ // Convert int32 to int16
+ ni0 = _mm_packs_epi32( ni0, ni1 );
+ ni2 = _mm_packs_epi32( ni2, ni3 );
+ ni4 = _mm_packs_epi32( ni4, ni5 );
+ ni6 = _mm_packs_epi32( ni6, ni7 );
+ // Convert int16 to int8
+ ni0 = _mm_packs_epi16( ni0, ni2 );
+ ni4 = _mm_packs_epi16( ni4, ni6 );
+
+ _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
+ _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
+#endif
+ }
+#elif defined(__riscv_v_intrinsic)
+
+ size_t vl = __riscv_vsetvl_e32m4(QK8_0);
+
+ for (int i = 0; i < nb; i++) {
+ // load elements
+ vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl);
+
+ vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
+ vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl);
+ vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
+ float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
+
+ // convert to integer
+ vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
+ vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
+
+ // store result
+ __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
+ }
+#else
+ GGML_UNUSED(nb);
+ // scalar
+ quantize_row_q8_0_reference(x, y, k);
+#endif
+}
+
+// reference implementation for deterministic creation of model files
+void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
+ assert(QK8_1 == 32);
+ assert(k % QK8_1 == 0);
+ const int nb = k / QK8_1;
+
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+
+ for (int j = 0; j < QK8_1; j++) {
+ const float v = x[i*QK8_1 + j];
+ amax = MAX(amax, fabsf(v));
+ }
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = d;
+
+ int sum = 0;
+
+ for (int j = 0; j < QK8_1/2; ++j) {
+ const float v0 = x[i*QK8_1 + j]*id;
+ const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
+
+ y[i].qs[ j] = roundf(v0);
+ y[i].qs[QK8_1/2 + j] = roundf(v1);
+
+ sum += y[i].qs[ j];
+ sum += y[i].qs[QK8_1/2 + j];
+ }
+
+ y[i].s = sum*d;
+ }
+}
+
+void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
+ assert(k % QK8_1 == 0);
+ const int nb = k / QK8_1;
+
+ block_q8_1 * restrict y = vy;
+
+#if defined(__ARM_NEON)
+ for (int i = 0; i < nb; i++) {
+ float32x4_t srcv [8];
+ float32x4_t asrcv[8];
+ float32x4_t amaxv[8];
+
+ for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
+ for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
+
+ for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
+ for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
+ for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
+
+ const float amax = vmaxvq_f32(amaxv[0]);
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = d;
+
+ int32x4_t accv = vdupq_n_s32(0);
+
+ for (int j = 0; j < 8; j++) {
+ const float32x4_t v = vmulq_n_f32(srcv[j], id);
+ const int32x4_t vi = vcvtnq_s32_f32(v);
+
+ y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
+ y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
+ y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
+ y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
+
+ accv = vaddq_s32(accv, vi);
+ }
+
+ y[i].s = d * vaddvq_s32(accv);
+ }
+#elif defined(__wasm_simd128__)
+ for (int i = 0; i < nb; i++) {
+ v128_t srcv [8];
+ v128_t asrcv[8];
+ v128_t amaxv[8];
+
+ for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
+ for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
+
+ for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
+ for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
+ for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
+
+ const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
+ wasm_f32x4_extract_lane(amaxv[0], 1)),
+ MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
+ wasm_f32x4_extract_lane(amaxv[0], 3)));
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = d;
+
+ v128_t accv = wasm_i32x4_splat(0);
+
+ for (int j = 0; j < 8; j++) {
+ const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
+ const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
+
+ y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
+ y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
+ y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
+ y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
+
+ accv = wasm_i32x4_add(accv, vi);
+ }
+
+ y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) +
+ wasm_i32x4_extract_lane(accv, 1) +
+ wasm_i32x4_extract_lane(accv, 2) +
+ wasm_i32x4_extract_lane(accv, 3));
+ }
+#elif defined(__AVX2__) || defined(__AVX__)
+ for (int i = 0; i < nb; i++) {
+ // Load elements into 4 AVX vectors
+ __m256 v0 = _mm256_loadu_ps( x );
+ __m256 v1 = _mm256_loadu_ps( x + 8 );
+ __m256 v2 = _mm256_loadu_ps( x + 16 );
+ __m256 v3 = _mm256_loadu_ps( x + 24 );
+ x += 32;
+
+ // Compute max(abs(e)) for the block
+ const __m256 signBit = _mm256_set1_ps( -0.0f );
+ __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
+
+ __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
+ max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
+ max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
+ const float maxScalar = _mm_cvtss_f32( max4 );
+
+ // Quantize these floats
+ const float d = maxScalar / 127.f;
+ y[i].d = d;
+ const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
+ const __m256 mul = _mm256_set1_ps( id );
+
+ // Apply the multiplier
+ v0 = _mm256_mul_ps( v0, mul );
+ v1 = _mm256_mul_ps( v1, mul );
+ v2 = _mm256_mul_ps( v2, mul );
+ v3 = _mm256_mul_ps( v3, mul );
+
+ // Round to nearest integer
+ v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
+ v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
+ v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
+ v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
+
+ // Convert floats to integers
+ __m256i i0 = _mm256_cvtps_epi32( v0 );
+ __m256i i1 = _mm256_cvtps_epi32( v1 );
+ __m256i i2 = _mm256_cvtps_epi32( v2 );
+ __m256i i3 = _mm256_cvtps_epi32( v3 );
+
+#if defined(__AVX2__)
+ // Compute the sum of the quants and set y[i].s
+ y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
+
+ // Convert int32 to int16
+ i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
+ i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
+ // Convert int16 to int8
+ i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+
+ // We got our precious signed bytes, but the order is now wrong
+ // These AVX2 pack instructions process 16-byte pieces independently
+ // The following instruction is fixing the order
+ const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
+ i0 = _mm256_permutevar8x32_epi32( i0, perm );
+
+ _mm256_storeu_si256((__m256i *)y[i].qs, i0);
+#else
+ // Since we don't have in AVX some necessary functions,
+ // we split the registers in half and call AVX2 analogs from SSE
+ __m128i ni0 = _mm256_castsi256_si128( i0 );
+ __m128i ni1 = _mm256_extractf128_si256( i0, 1);
+ __m128i ni2 = _mm256_castsi256_si128( i1 );
+ __m128i ni3 = _mm256_extractf128_si256( i1, 1);
+ __m128i ni4 = _mm256_castsi256_si128( i2 );
+ __m128i ni5 = _mm256_extractf128_si256( i2, 1);
+ __m128i ni6 = _mm256_castsi256_si128( i3 );
+ __m128i ni7 = _mm256_extractf128_si256( i3, 1);
+
+ // Compute the sum of the quants and set y[i].s
+ const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
+ const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
+ y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
+
+ // Convert int32 to int16
+ ni0 = _mm_packs_epi32( ni0, ni1 );
+ ni2 = _mm_packs_epi32( ni2, ni3 );
+ ni4 = _mm_packs_epi32( ni4, ni5 );
+ ni6 = _mm_packs_epi32( ni6, ni7 );
+ // Convert int16 to int8
+ ni0 = _mm_packs_epi16( ni0, ni2 );
+ ni4 = _mm_packs_epi16( ni4, ni6 );
+
+ _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
+ _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
+#endif
+ }
+#elif defined(__riscv_v_intrinsic)
+
+ size_t vl = __riscv_vsetvl_e32m4(QK8_1);
+
+ for (int i = 0; i < nb; i++) {
+ // load elements
+ vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl);
+
+ vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
+ vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl);
+ vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
+ float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = d;
+
+ vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
+
+ // convert to integer
+ vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
+ vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
+
+ // store result
+ __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
+
+ // compute sum for y[i].s
+ vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl);
+ vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl);
+
+ // set y[i].s
+ int sum = __riscv_vmv_x_s_i16m1_i16(vwrs);
+ y[i].s = sum*d;
+ }
+#else
+ GGML_UNUSED(nb);
+ // scalar
+ quantize_row_q8_1_reference(x, y, k);
+#endif
+}
+
+void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
+ static const int qk = QK4_0;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+
+ for (int j = 0; j < qk/2; ++j) {
+ const int x0 = (x[i].qs[j] & 0x0F) - 8;
+ const int x1 = (x[i].qs[j] >> 4) - 8;
+
+ y[i*qk + j + 0 ] = x0*d;
+ y[i*qk + j + qk/2] = x1*d;
+ }
+ }
+}
+
+void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
+ static const int qk = QK4_1;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+ const float m = GGML_FP16_TO_FP32(x[i].m);
+
+ for (int j = 0; j < qk/2; ++j) {
+ const int x0 = (x[i].qs[j] & 0x0F);
+ const int x1 = (x[i].qs[j] >> 4);
+
+ y[i*qk + j + 0 ] = x0*d + m;
+ y[i*qk + j + qk/2] = x1*d + m;
+ }
+ }
+}
+
+void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
+ static const int qk = QK5_0;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+
+ uint32_t qh;
+ memcpy(&qh, x[i].qh, sizeof(qh));
+
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
+ const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
+
+ const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
+ const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
+
+ y[i*qk + j + 0 ] = x0*d;
+ y[i*qk + j + qk/2] = x1*d;
+ }
+ }
+}
+
+void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
+ static const int qk = QK5_1;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+ const float m = GGML_FP16_TO_FP32(x[i].m);
+
+ uint32_t qh;
+ memcpy(&qh, x[i].qh, sizeof(qh));
+
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
+ const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
+
+ const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
+ const int x1 = (x[i].qs[j] >> 4) | xh_1;
+
+ y[i*qk + j + 0 ] = x0*d + m;
+ y[i*qk + j + qk/2] = x1*d + m;
+ }
+ }
+}
+
+void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k) {
+ static const int qk = QK8_0;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+
+ for (int j = 0; j < qk; ++j) {
+ y[i*qk + j] = x[i].qs[j]*d;
+ }
+ }
+}
+
+//
+// 2-6 bit quantization in super-blocks
+//
+
+//
+// ===================== Helper functions
+//
+static inline int nearest_int(float fval) {
+ assert(fval <= 4194303.f);
+ float val = fval + 12582912.f;
+ int i; memcpy(&i, &val, sizeof(int));
+ return (i & 0x007fffff) - 0x00400000;
+}
+
+static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type) {
+ float max = 0;
+ float amax = 0;
+ for (int i = 0; i < n; ++i) {
+ float ax = fabsf(x[i]);
+ if (ax > amax) { amax = ax; max = x[i]; }
+ }
+ if (amax < 1e-30f) { // all zero
+ for (int i = 0; i < n; ++i) {
+ L[i] = 0;
+ }
+ return 0.f;
+ }
+ float iscale = -nmax / max;
+ if (rmse_type == 0) {
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
+ }
+ return 1/iscale;
+ }
+ bool return_early = false;
+ if (rmse_type < 0) {
+ rmse_type = -rmse_type;
+ return_early = true;
+ }
+ int weight_type = rmse_type%2;
+ float sumlx = 0;
+ float suml2 = 0;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ l = MAX(-nmax, MIN(nmax-1, l));
+ L[i] = l + nmax;
+ float w = weight_type == 1 ? x[i] * x[i] : 1;
+ sumlx += w*x[i]*l;
+ suml2 += w*l*l;
+ }
+ float scale = sumlx/suml2;
+ if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale;
+ float best = scale * sumlx;
+ for (int is = -9; is <= 9; ++is) {
+ if (is == 0) {
+ continue;
+ }
+ iscale = -(nmax + 0.1f*is) / max;
+ sumlx = suml2 = 0;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ l = MAX(-nmax, MIN(nmax-1, l));
+ float w = weight_type == 1 ? x[i] * x[i] : 1;
+ sumlx += w*x[i]*l;
+ suml2 += w*l*l;
+ }
+ if (suml2 > 0 && sumlx*sumlx > best*suml2) {
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
+ }
+ scale = sumlx/suml2; best = scale*sumlx;
+ }
+ }
+ return scale;
+}
+
+static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) {
+ float max = 0;
+ float amax = 0;
+ for (int i = 0; i < n; ++i) {
+ float ax = fabsf(x[i]);
+ if (ax > amax) { amax = ax; max = x[i]; }
+ }
+ if (!amax) { // all zero
+ for (int i = 0; i < n; ++i) { L[i] = 0; }
+ return 0.f;
+ }
+ float iscale = -nmax / max;
+ if (do_rmse) {
+ float sumlx = 0;
+ float suml2 = 0;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ l = MAX(-nmax, MIN(nmax-1, l));
+ L[i] = l;
+ float w = x[i]*x[i];
+ sumlx += w*x[i]*l;
+ suml2 += w*l*l;
+ }
+ for (int itry = 0; itry < 5; ++itry) {
+ int n_changed = 0;
+ for (int i = 0; i < n; ++i) {
+ float w = x[i]*x[i];
+ float slx = sumlx - w*x[i]*L[i];
+ if (slx > 0) {
+ float sl2 = suml2 - w*L[i]*L[i];
+ int new_l = nearest_int(x[i] * sl2 / slx);
+ new_l = MAX(-nmax, MIN(nmax-1, new_l));
+ if (new_l != L[i]) {
+ slx += w*x[i]*new_l;
+ sl2 += w*new_l*new_l;
+ if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) {
+ L[i] = new_l; sumlx = slx; suml2 = sl2;
+ ++n_changed;
+ }
+ }
+ }
+ }
+ if (!n_changed) {
+ break;
+ }
+ }
+ for (int i = 0; i < n; ++i) {
+ L[i] += nmax;
+ }
+ return sumlx / suml2;
+ }
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ l = MAX(-nmax, MIN(nmax-1, l));
+ L[i] = l + nmax;
+ }
+ return 1/iscale;
+}
+
+static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
+ int ntry, float alpha) {
+ float min = x[0];
+ float max = x[0];
+ for (int i = 1; i < n; ++i) {
+ if (x[i] < min) min = x[i];
+ if (x[i] > max) max = x[i];
+ }
+ if (max == min) {
+ for (int i = 0; i < n; ++i) L[i] = 0;
+ *the_min = 0;
+ return 0.f;
+ }
+ if (min > 0) min = 0;
+ float iscale = nmax/(max - min);
+ float scale = 1/iscale;
+ for (int itry = 0; itry < ntry; ++itry) {
+ float sumlx = 0; int suml2 = 0;
+ bool did_change = false;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale*(x[i] - min));
+ l = MAX(0, MIN(nmax, l));
+ if (l != L[i]) {
+ L[i] = l;
+ did_change = true;
+ }
+ sumlx += (x[i] - min)*l;
+ suml2 += l*l;
+ }
+ scale = sumlx/suml2;
+ float sum = 0;
+ for (int i = 0; i < n; ++i) {
+ sum += x[i] - scale*L[i];
+ }
+ min = alpha*min + (1 - alpha)*sum/n;
+ if (min > 0) min = 0;
+ iscale = 1/scale;
+ if (!did_change) break;
+ }
+ *the_min = -min;
+ return scale;
+}
+
+static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
+ uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
+ float rmin, float rdelta, int nstep, bool use_mad) {
+ float min = x[0];
+ float max = x[0];
+ float sum_w = weights[0];
+ float sum_x = sum_w * x[0];
+ for (int i = 1; i < n; ++i) {
+ if (x[i] < min) min = x[i];
+ if (x[i] > max) max = x[i];
+ float w = weights[i];
+ sum_w += w;
+ sum_x += w * x[i];
+ }
+ if (min > 0) min = 0;
+ if (max == min) {
+ for (int i = 0; i < n; ++i) L[i] = 0;
+ *the_min = -min;
+ return 0.f;
+ }
+ float iscale = nmax/(max - min);
+ float scale = 1/iscale;
+ float best_mad = 0;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale*(x[i] - min));
+ L[i] = MAX(0, MIN(nmax, l));
+ float diff = scale * L[i] + min - x[i];
+ diff = use_mad ? fabsf(diff) : diff * diff;
+ float w = weights[i];
+ best_mad += w * diff;
+ }
+ if (nstep < 1) {
+ *the_min = -min;
+ return scale;
+ }
+ for (int is = 0; is <= nstep; ++is) {
+ iscale = (rmin + rdelta*is + nmax)/(max - min);
+ float sum_l = 0, sum_l2 = 0, sum_xl = 0;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale*(x[i] - min));
+ l = MAX(0, MIN(nmax, l));
+ Laux[i] = l;
+ float w = weights[i];
+ sum_l += w*l;
+ sum_l2 += w*l*l;
+ sum_xl += w*l*x[i];
+ }
+ float D = sum_w * sum_l2 - sum_l * sum_l;
+ if (D > 0) {
+ float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
+ float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
+ if (this_min > 0) {
+ this_min = 0;
+ this_scale = sum_xl / sum_l2;
+ }
+ float mad = 0;
+ for (int i = 0; i < n; ++i) {
+ float diff = this_scale * Laux[i] + this_min - x[i];
+ diff = use_mad ? fabsf(diff) : diff * diff;
+ float w = weights[i];
+ mad += w * diff;
+ }
+ if (mad < best_mad) {
+ for (int i = 0; i < n; ++i) {
+ L[i] = Laux[i];
+ }
+ best_mad = mad;
+ scale = this_scale;
+ min = this_min;
+ }
+ }
+ }
+ *the_min = -min;
+ return scale;
+}
+
+#if QK_K == 256
+static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) {
+ if (j < 4) {
+ *d = q[j] & 63; *m = q[j + 4] & 63;
+ } else {
+ *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
+ *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
+ }
+}
+#endif
+
+//========================- 2-bit (de)-quantization
+
+void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ uint8_t L[QK_K];
+ uint8_t Laux[16];
+ float weights[16];
+ float mins[QK_K/16];
+ float scales[QK_K/16];
+
+ const float q4scale = 15.f;
+
+ for (int i = 0; i < nb; i++) {
+ float max_scale = 0; // as we are deducting the min, scales are always positive
+ float max_min = 0;
+ for (int j = 0; j < QK_K/16; ++j) {
+ for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]);
+ scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true);
+ float scale = scales[j];
+ if (scale > max_scale) {
+ max_scale = scale;
+ }
+ float min = mins[j];
+ if (min > max_min) {
+ max_min = min;
+ }
+ }
+
+ if (max_scale > 0) {
+ float iscale = q4scale/max_scale;
+ for (int j = 0; j < QK_K/16; ++j) {
+ int l = nearest_int(iscale*scales[j]);
+ y[i].scales[j] = l;
+ }
+ y[i].d = GGML_FP32_TO_FP16(max_scale/q4scale);
+ } else {
+ for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0;
+ y[i].d = GGML_FP32_TO_FP16(0.f);
+ }
+ if (max_min > 0) {
+ float iscale = q4scale/max_min;
+ for (int j = 0; j < QK_K/16; ++j) {
+ int l = nearest_int(iscale*mins[j]);
+ y[i].scales[j] |= (l << 4);
+ }
+ y[i].dmin = GGML_FP32_TO_FP16(max_min/q4scale);
+ } else {
+ y[i].dmin = GGML_FP32_TO_FP16(0.f);
+ }
+ for (int j = 0; j < QK_K/16; ++j) {
+ const float d = GGML_FP16_TO_FP32(y[i].d) * (y[i].scales[j] & 0xF);
+ if (!d) continue;
+ const float dm = GGML_FP16_TO_FP32(y[i].dmin) * (y[i].scales[j] >> 4);
+ for (int ii = 0; ii < 16; ++ii) {
+ int l = nearest_int((x[16*j + ii] + dm)/d);
+ l = MAX(0, MIN(3, l));
+ L[16*j + ii] = l;
+ }
+ }
+
+#if QK_K == 256
+ for (int j = 0; j < QK_K; j += 128) {
+ for (int l = 0; l < 32; ++l) {
+ y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
+ }
+ }
+#else
+ for (int l = 0; l < 16; ++l) {
+ y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
+ }
+#endif
+
+ x += QK_K;
+
+ }
+}
+
+void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+ const float min = GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * q = x[i].qs;
+
+#if QK_K == 256
+ int is = 0;
+ float dl, ml;
+ for (int n = 0; n < QK_K; n += 128) {
+ int shift = 0;
+ for (int j = 0; j < 4; ++j) {
+
+ uint8_t sc = x[i].scales[is++];
+ dl = d * (sc & 0xF); ml = min * (sc >> 4);
+ for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml;
+
+ sc = x[i].scales[is++];
+ dl = d * (sc & 0xF); ml = min * (sc >> 4);
+ for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml;
+
+ shift += 2;
+ }
+ q += 32;
+ }
+#else
+ float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4);
+ float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4);
+ float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4);
+ float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4);
+ for (int l = 0; l < 16; ++l) {
+ y[l+ 0] = dl1 * ((int8_t)((q[l] >> 0) & 3)) - ml1;
+ y[l+16] = dl2 * ((int8_t)((q[l] >> 2) & 3)) - ml2;
+ y[l+32] = dl3 * ((int8_t)((q[l] >> 4) & 3)) - ml3;
+ y[l+48] = dl4 * ((int8_t)((q[l] >> 6) & 3)) - ml4;
+ }
+ y += QK_K;
+#endif
+ }
+}
+
+void quantize_row_q2_K(const float * restrict x, void * restrict vy, int k) {
+ quantize_row_q2_K_reference(x, vy, k);
+}
+
+size_t ggml_quantize_q2_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
+ (void)hist; // TODO: collect histograms
+
+ for (int j = 0; j < n; j += k) {
+ block_q2_K * restrict y = (block_q2_K *)dst + j/QK_K;
+ quantize_row_q2_K_reference(src + j, y, k);
+ }
+ return (n/QK_K*sizeof(block_q2_K));
+}
+
+//========================= 3-bit (de)-quantization
+
+void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ int8_t L[QK_K];
+ float scales[QK_K / 16];
+
+ for (int i = 0; i < nb; i++) {
+
+ float max_scale = 0;
+ float amax = 0;
+ for (int j = 0; j < QK_K/16; ++j) {
+ scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true);
+ float scale = fabsf(scales[j]);
+ if (scale > amax) {
+ amax = scale; max_scale = scales[j];
+ }
+ }
+
+#if QK_K == 256
+ memset(y[i].scales, 0, 12);
+ if (max_scale) {
+ float iscale = -32.f/max_scale;
+ for (int j = 0; j < QK_K/16; ++j) {
+ int8_t l = nearest_int(iscale*scales[j]);
+ l = MAX(-32, MIN(31, l)) + 32;
+ if (j < 8) {
+ y[i].scales[j] = l & 0xF;
+ } else {
+ y[i].scales[j-8] |= ((l & 0xF) << 4);
+ }
+ l >>= 4;
+ y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
+ }
+ y[i].d = GGML_FP32_TO_FP16(1/iscale);
+ } else {
+ y[i].d = GGML_FP32_TO_FP16(0.f);
+ }
+
+ int8_t sc;
+ for (int j = 0; j < QK_K/16; ++j) {
+ sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
+ sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
+ float d = GGML_FP16_TO_FP32(y[i].d) * sc;
+ if (!d) {
+ continue;
+ }
+ for (int ii = 0; ii < 16; ++ii) {
+ int l = nearest_int(x[16*j + ii]/d);
+ l = MAX(-4, MIN(3, l));
+ L[16*j + ii] = l + 4;
+ }
+ }
+#else
+ if (max_scale) {
+ float iscale = -8.f/max_scale;
+ for (int j = 0; j < QK_K/16; j+=2) {
+ int l1 = nearest_int(iscale*scales[j]);
+ l1 = 8 + MAX(-8, MIN(7, l1));
+ int l2 = nearest_int(iscale*scales[j+1]);
+ l2 = 8 + MAX(-8, MIN(7, l2));
+ y[i].scales[j/2] = l1 | (l2 << 4);
+ }
+ y[i].d = GGML_FP32_TO_FP16(1/iscale);
+ } else {
+ for (int j = 0; j < QK_K/16; j+=2) {
+ y[i].scales[j/2] = 0;
+ }
+ y[i].d = GGML_FP32_TO_FP16(0.f);
+ }
+ for (int j = 0; j < QK_K/16; ++j) {
+ int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4;
+ float d = GGML_FP16_TO_FP32(y[i].d) * (s - 8);
+ if (!d) {
+ continue;
+ }
+ for (int ii = 0; ii < 16; ++ii) {
+ int l = nearest_int(x[16*j + ii]/d);
+ l = MAX(-4, MIN(3, l));
+ L[16*j + ii] = l + 4;
+ }
+ }
+#endif
+
+ memset(y[i].hmask, 0, QK_K/8);
+ // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
+ int m = 0;
+ uint8_t hm = 1;
+ for (int j = 0; j < QK_K; ++j) {
+ if (L[j] > 3) {
+ y[i].hmask[m] |= hm;
+ L[j] -= 4;
+ }
+ if (++m == QK_K/8) {
+ m = 0; hm <<= 1;
+ }
+ }
+#if QK_K == 256
+ for (int j = 0; j < QK_K; j += 128) {
+ for (int l = 0; l < 32; ++l) {
+ y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
+ }
+ }
+#else
+ for (int l = 0; l < 16; ++l) {
+ y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
+ }
+#endif
+
+ x += QK_K;
+ }
+}
+
+#if QK_K == 256
+void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ const uint32_t kmask1 = 0x03030303;
+ const uint32_t kmask2 = 0x0f0f0f0f;
+
+ uint32_t aux[4];
+ const int8_t * scales = (const int8_t*)aux;
+
+ for (int i = 0; i < nb; i++) {
+
+ const float d_all = GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q = x[i].qs;
+ const uint8_t * restrict hm = x[i].hmask;
+ uint8_t m = 1;
+
+ memcpy(aux, x[i].scales, 12);
+ uint32_t tmp = aux[2];
+ aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
+ aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
+ aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
+ aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
+
+ int is = 0;
+ float dl;
+ for (int n = 0; n < QK_K; n += 128) {
+ int shift = 0;
+ for (int j = 0; j < 4; ++j) {
+
+ dl = d_all * (scales[is++] - 32);
+ for (int l = 0; l < 16; ++l) {
+ *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4));
+ }
+
+ dl = d_all * (scales[is++] - 32);
+ for (int l = 0; l < 16; ++l) {
+ *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4));
+ }
+
+ shift += 2;
+ m <<= 1;
+ }
+ q += 32;
+ }
+
+ }
+}
+#else
+void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ assert(QK_K == 64);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ const float d_all = GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q = x[i].qs;
+ const uint8_t * restrict hm = x[i].hmask;
+
+ const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8);
+ const float d2 = d_all * ((x[i].scales[0] >> 4) - 8);
+ const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8);
+ const float d4 = d_all * ((x[i].scales[1] >> 4) - 8);
+
+ for (int l=0; l<8; ++l) {
+ uint8_t h = hm[l];
+ y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4));
+ y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4));
+ y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4));
+ y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4));
+ y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4));
+ y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4));
+ y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4));
+ y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4));
+ }
+ y += QK_K;
+ }
+}
+#endif
+
+void quantize_row_q3_K(const float * restrict x, void * restrict vy, int k) {
+ quantize_row_q3_K_reference(x, vy, k);
+}
+
+size_t ggml_quantize_q3_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
+ (void)hist; // TODO: collect histograms
+
+ for (int j = 0; j < n; j += k) {
+ block_q3_K * restrict y = (block_q3_K *)dst + j/QK_K;
+ quantize_row_q3_K_reference(src + j, y, k);
+ }
+ return (n/QK_K*sizeof(block_q3_K));
+}
+
+// ====================== 4-bit (de)-quantization
+
+void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ uint8_t L[QK_K];
+ uint8_t Laux[32];
+ float weights[32];
+ float mins[QK_K/32];
+ float scales[QK_K/32];
+
+ for (int i = 0; i < nb; i++) {
+
+ float max_scale = 0; // as we are deducting the min, scales are always positive
+ float max_min = 0;
+ for (int j = 0; j < QK_K/32; ++j) {
+ //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
+ float sum_x2 = 0;
+ for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
+ float av_x = sqrtf(sum_x2/32);
+ for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
+ scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false);
+ float scale = scales[j];
+ if (scale > max_scale) {
+ max_scale = scale;
+ }
+ float min = mins[j];
+ if (min > max_min) {
+ max_min = min;
+ }
+ }
+
+#if QK_K == 256
+ float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
+ float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
+ for (int j = 0; j < QK_K/32; ++j) {
+ uint8_t ls = nearest_int(inv_scale*scales[j]);
+ uint8_t lm = nearest_int(inv_min*mins[j]);
+ ls = MIN(63, ls);
+ lm = MIN(63, lm);
+ if (j < 4) {
+ y[i].scales[j] = ls;
+ y[i].scales[j+4] = lm;
+ } else {
+ y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
+ y[i].scales[j-4] |= ((ls >> 4) << 6);
+ y[i].scales[j-0] |= ((lm >> 4) << 6);
+ }
+ }
+ y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
+ y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
+
+ uint8_t sc, m;
+ for (int j = 0; j < QK_K/32; ++j) {
+ get_scale_min_k4(j, y[i].scales, &sc, &m);
+ const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
+ if (!d) continue;
+ const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
+ for (int ii = 0; ii < 32; ++ii) {
+ int l = nearest_int((x[32*j + ii] + dm)/d);
+ l = MAX(0, MIN(15, l));
+ L[32*j + ii] = l;
+ }
+ }
+#else
+ const float s_factor = 15.f;
+ float inv_scale = max_scale > 0 ? s_factor/max_scale : 0.f;
+ float inv_min = max_min > 0 ? s_factor/max_min : 0.f;
+ int d1 = nearest_int(inv_scale*scales[0]);
+ int m1 = nearest_int(inv_min*mins[0]);
+ int d2 = nearest_int(inv_scale*scales[1]);
+ int m2 = nearest_int(inv_min*mins[1]);
+ y[i].scales[0] = d1 | (m1 << 4);
+ y[i].scales[1] = d2 | (m2 << 4);
+ y[i].d[0] = GGML_FP32_TO_FP16(max_scale/s_factor);
+ y[i].d[1] = GGML_FP32_TO_FP16(max_min/s_factor);
+
+ float sumlx = 0;
+ int suml2 = 0;
+ for (int j = 0; j < QK_K/32; ++j) {
+ const uint8_t sd = y[i].scales[j] & 0xF;
+ const uint8_t sm = y[i].scales[j] >> 4;
+ const float d = GGML_FP16_TO_FP32(y[i].d[0]) * sd;
+ if (!d) continue;
+ const float m = GGML_FP16_TO_FP32(y[i].d[1]) * sm;
+ for (int ii = 0; ii < 32; ++ii) {
+ int l = nearest_int((x[32*j + ii] + m)/d);
+ l = MAX(0, MIN(15, l));
+ L[32*j + ii] = l;
+ sumlx += (x[32*j + ii] + m)*l*sd;
+ suml2 += l*l*sd*sd;
+ }
+ }
+ if (suml2) {
+ y[i].d[0] = GGML_FP32_TO_FP16(sumlx/suml2);
+ }
+#endif
+ uint8_t * q = y[i].qs;
+ for (int j = 0; j < QK_K; j += 64) {
+ for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
+ q += 32;
+ }
+
+ x += QK_K;
+
+ }
+}
+
+void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ const uint8_t * q = x[i].qs;
+
+#if QK_K == 256
+
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+ const float min = GGML_FP16_TO_FP32(x[i].dmin);
+
+ int is = 0;
+ uint8_t sc, m;
+ for (int j = 0; j < QK_K; j += 64) {
+ get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
+ const float d1 = d * sc; const float m1 = min * m;
+ get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
+ const float d2 = d * sc; const float m2 = min * m;
+ for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1;
+ for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2;
+ q += 32; is += 2;
+ }
+#else
+ const float dall = GGML_FP16_TO_FP32(x[i].d[0]);
+ const float mall = GGML_FP16_TO_FP32(x[i].d[1]);
+ const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4);
+ const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4);
+ for (int l = 0; l < 32; ++l) {
+ y[l+ 0] = d1 * (q[l] & 0xF) - m1;
+ y[l+32] = d2 * (q[l] >> 4) - m2;
+ }
+ y += QK_K;
+#endif
+
+ }
+}
+
+void quantize_row_q4_K(const float * restrict x, void * restrict vy, int k) {
+ assert(k % QK_K == 0);
+ block_q4_K * restrict y = vy;
+ quantize_row_q4_K_reference(x, y, k);
+}
+
+size_t ggml_quantize_q4_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
+ assert(k % QK_K == 0);
+ (void)hist; // TODO: collect histograms
+
+ for (int j = 0; j < n; j += k) {
+ block_q4_K * restrict y = (block_q4_K *)dst + j/QK_K;
+ quantize_row_q4_K_reference(src + j, y, k);
+ }
+ return (n/QK_K*sizeof(block_q4_K));
+}
+
+// ====================== 5-bit (de)-quantization
+
+void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+#if QK_K == 256
+ uint8_t L[QK_K];
+ float mins[QK_K/32];
+ float scales[QK_K/32];
+ float weights[32];
+ uint8_t Laux[32];
+#else
+ int8_t L[QK_K];
+ float scales[QK_K/16];
+#endif
+
+ for (int i = 0; i < nb; i++) {
+
+#if QK_K == 256
+
+ float max_scale = 0; // as we are deducting the min, scales are always positive
+ float max_min = 0;
+ for (int j = 0; j < QK_K/32; ++j) {
+ //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
+ float sum_x2 = 0;
+ for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
+ float av_x = sqrtf(sum_x2/32);
+ for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
+ scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false);
+ float scale = scales[j];
+ if (scale > max_scale) {
+ max_scale = scale;
+ }
+ float min = mins[j];
+ if (min > max_min) {
+ max_min = min;
+ }
+ }
+
+ float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
+ float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
+ for (int j = 0; j < QK_K/32; ++j) {
+ uint8_t ls = nearest_int(inv_scale*scales[j]);
+ uint8_t lm = nearest_int(inv_min*mins[j]);
+ ls = MIN(63, ls);
+ lm = MIN(63, lm);
+ if (j < 4) {
+ y[i].scales[j] = ls;
+ y[i].scales[j+4] = lm;
+ } else {
+ y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
+ y[i].scales[j-4] |= ((ls >> 4) << 6);
+ y[i].scales[j-0] |= ((lm >> 4) << 6);
+ }
+ }
+ y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
+ y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
+
+ uint8_t sc, m;
+ for (int j = 0; j < QK_K/32; ++j) {
+ get_scale_min_k4(j, y[i].scales, &sc, &m);
+ const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
+ if (!d) continue;
+ const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
+ for (int ii = 0; ii < 32; ++ii) {
+ int l = nearest_int((x[32*j + ii] + dm)/d);
+ l = MAX(0, MIN(31, l));
+ L[32*j + ii] = l;
+ }
+ }
+
+ uint8_t * restrict qh = y[i].qh;
+ uint8_t * restrict ql = y[i].qs;
+ memset(qh, 0, QK_K/8);
+
+ uint8_t m1 = 1, m2 = 2;
+ for (int n = 0; n < QK_K; n += 64) {
+ for (int j = 0; j < 32; ++j) {
+ int l1 = L[n + j];
+ if (l1 > 15) {
+ l1 -= 16; qh[j] |= m1;
+ }
+ int l2 = L[n + j + 32];
+ if (l2 > 15) {
+ l2 -= 16; qh[j] |= m2;
+ }
+ ql[j] = l1 | (l2 << 4);
+ }
+ m1 <<= 2; m2 <<= 2;
+ ql += 32;
+ }
+#else
+ float max_scale = 0, amax = 0;
+ for (int j = 0; j < QK_K/16; ++j) {
+ scales[j] = make_qx_quants(16, 16, x + 16*j, L + 16*j, 1);
+ float abs_scale = fabsf(scales[j]);
+ if (abs_scale > amax) {
+ amax = abs_scale;
+ max_scale = scales[j];
+ }
+ }
+
+ float iscale = -128.f/max_scale;
+ for (int j = 0; j < QK_K/16; ++j) {
+ int l = nearest_int(iscale*scales[j]);
+ y[i].scales[j] = MAX(-128, MIN(127, l));
+ }
+ y[i].d = GGML_FP32_TO_FP16(1/iscale);
+
+ for (int j = 0; j < QK_K/16; ++j) {
+ const float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
+ if (!d) continue;
+ for (int ii = 0; ii < 16; ++ii) {
+ int l = nearest_int(x[16*j + ii]/d);
+ l = MAX(-16, MIN(15, l));
+ L[16*j + ii] = l + 16;
+ }
+ }
+
+ uint8_t * restrict qh = y[i].qh;
+ uint8_t * restrict ql = y[i].qs;
+ memset(qh, 0, QK_K/8);
+
+ for (int j = 0; j < 32; ++j) {
+ int jm = j%8;
+ int is = j/8;
+ int l1 = L[j];
+ if (l1 > 15) {
+ l1 -= 16; qh[jm] |= (1 << is);
+ }
+ int l2 = L[j + 32];
+ if (l2 > 15) {
+ l2 -= 16; qh[jm] |= (1 << (4 + is));
+ }
+ ql[j] = l1 | (l2 << 4);
+ }
+#endif
+
+ x += QK_K;
+
+ }
+}
+
+void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ const uint8_t * ql = x[i].qs;
+ const uint8_t * qh = x[i].qh;
+
+#if QK_K == 256
+
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+ const float min = GGML_FP16_TO_FP32(x[i].dmin);
+
+ int is = 0;
+ uint8_t sc, m;
+ uint8_t u1 = 1, u2 = 2;
+ for (int j = 0; j < QK_K; j += 64) {
+ get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
+ const float d1 = d * sc; const float m1 = min * m;
+ get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
+ const float d2 = d * sc; const float m2 = min * m;
+ for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1;
+ for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2;
+ ql += 32; is += 2;
+ u1 <<= 2; u2 <<= 2;
+ }
+#else
+ float d = GGML_FP16_TO_FP32(x[i].d);
+ const int8_t * restrict s = x[i].scales;
+ for (int l = 0; l < 8; ++l) {
+ y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16));
+ y[l+ 8] = d * s[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16));
+ y[l+16] = d * s[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16));
+ y[l+24] = d * s[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16));
+ y[l+32] = d * s[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16));
+ y[l+40] = d * s[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16));
+ y[l+48] = d * s[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16));
+ y[l+56] = d * s[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16));
+ }
+ y += QK_K;
+#endif
+ }
+}
+
+void quantize_row_q5_K(const float * restrict x, void * restrict vy, int k) {
+ assert(k % QK_K == 0);
+ block_q5_K * restrict y = vy;
+ quantize_row_q5_K_reference(x, y, k);
+}
+
+size_t ggml_quantize_q5_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
+ assert(k % QK_K == 0);
+ (void)hist; // TODO: collect histograms
+
+ for (int j = 0; j < n; j += k) {
+ block_q5_K * restrict y = (block_q5_K *)dst + j/QK_K;
+ quantize_row_q5_K_reference(src + j, y, k);
+ }
+ return (n/QK_K*sizeof(block_q5_K));
+}
+
+// ====================== 6-bit (de)-quantization
+
+void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ int8_t L[QK_K];
+ float scales[QK_K/16];
+
+ for (int i = 0; i < nb; i++) {
+
+ float max_scale = 0;
+ float max_abs_scale = 0;
+
+ for (int ib = 0; ib < QK_K/16; ++ib) {
+
+ const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1);
+ scales[ib] = scale;
+
+ const float abs_scale = fabsf(scale);
+ if (abs_scale > max_abs_scale) {
+ max_abs_scale = abs_scale;
+ max_scale = scale;
+ }
+
+ }
+
+ if (!max_abs_scale) {
+ memset(&y[i], 0, sizeof(block_q6_K));
+ y[i].d = GGML_FP32_TO_FP16(0.f);
+ x += QK_K;
+ continue;
+ }
+
+ float iscale = -128.f/max_scale;
+ y[i].d = GGML_FP32_TO_FP16(1/iscale);
+ for (int ib = 0; ib < QK_K/16; ++ib) {
+ y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
+ }
+
+ for (int j = 0; j < QK_K/16; ++j) {
+ float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
+ if (!d) {
+ continue;
+ }
+ for (int ii = 0; ii < 16; ++ii) {
+ int l = nearest_int(x[16*j + ii]/d);
+ l = MAX(-32, MIN(31, l));
+ L[16*j + ii] = l + 32;
+ }
+ }
+
+ uint8_t * restrict ql = y[i].ql;
+ uint8_t * restrict qh = y[i].qh;
+#if QK_K == 256
+ for (int j = 0; j < QK_K; j += 128) {
+ for (int l = 0; l < 32; ++l) {
+ const uint8_t q1 = L[j + l + 0] & 0xF;
+ const uint8_t q2 = L[j + l + 32] & 0xF;
+ const uint8_t q3 = L[j + l + 64] & 0xF;
+ const uint8_t q4 = L[j + l + 96] & 0xF;
+ ql[l+ 0] = q1 | (q3 << 4);
+ ql[l+32] = q2 | (q4 << 4);
+ qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
+ }
+ ql += 64;
+ qh += 32;
+ }
+#else
+ for (int l = 0; l < 32; ++l) {
+ const uint8_t q1 = L[l + 0] & 0xF;
+ const uint8_t q2 = L[l + 32] & 0xF;
+ ql[l] = q1 | (q2 << 4);
+ }
+ for (int l = 0; l < 16; ++l) {
+ qh[l] = (L[l] >> 4) | ((L[l + 16] >> 4) << 2) | ((L[l + 32] >> 4) << 4) | ((L[l + 48] >> 4) << 6);
+ }
+#endif
+
+ x += QK_K;
+
+ }
+}
+
+void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict ql = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict sc = x[i].scales;
+
+#if QK_K == 256
+ for (int n = 0; n < QK_K; n += 128) {
+ for (int l = 0; l < 32; ++l) {
+ int is = l/16;
+ const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
+ const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
+ const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
+ const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
+ y[l + 0] = d * sc[is + 0] * q1;
+ y[l + 32] = d * sc[is + 2] * q2;
+ y[l + 64] = d * sc[is + 4] * q3;
+ y[l + 96] = d * sc[is + 6] * q4;
+ }
+ y += 128;
+ ql += 64;
+ qh += 32;
+ sc += 8;
+ }
+#else
+ for (int l = 0; l < 16; ++l) {
+ const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
+ const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
+ const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
+ const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
+ y[l+ 0] = d * sc[0] * q1;
+ y[l+16] = d * sc[1] * q2;
+ y[l+32] = d * sc[2] * q3;
+ y[l+48] = d * sc[3] * q4;
+ }
+ y += 64;
+#endif
+
+ }
+}
+
+void quantize_row_q6_K(const float * restrict x, void * restrict vy, int k) {
+ assert(k % QK_K == 0);
+ block_q6_K * restrict y = vy;
+ quantize_row_q6_K_reference(x, y, k);
+}
+
+size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist) {
+ assert(k % QK_K == 0);
+ (void)hist; // TODO: collect histograms
+
+ for (int j = 0; j < n; j += k) {
+ block_q6_K * restrict y = (block_q6_K *)dst + j/QK_K;
+ quantize_row_q6_K_reference(src + j, y, k);
+ }
+ return (n/QK_K*sizeof(block_q6_K));
+}
+
+//===================================== Q8_K ==============================================
+
+void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ float max = 0;
+ float amax = 0;
+ for (int j = 0; j < QK_K; ++j) {
+ float ax = fabsf(x[j]);
+ if (ax > amax) {
+ amax = ax; max = x[j];
+ }
+ }
+ if (!amax) {
+ y[i].d = 0;
+ memset(y[i].qs, 0, QK_K);
+ x += QK_K;
+ continue;
+ }
+ const float iscale = -128.f/max;
+ for (int j = 0; j < QK_K; ++j) {
+ int v = nearest_int(iscale*x[j]);
+ y[i].qs[j] = MIN(127, v);
+ }
+ for (int j = 0; j < QK_K/16; ++j) {
+ int sum = 0;
+ for (int ii = 0; ii < 16; ++ii) {
+ sum += y[i].qs[j*16 + ii];
+ }
+ y[i].bsums[j] = sum;
+ }
+ y[i].d = 1/iscale;
+ x += QK_K;
+ }
+}
+
+void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+ for (int j = 0; j < QK_K; ++j) {
+ *y++ = x[i].d * x[i].qs[j];
+ }
+ }
+}
+
+void quantize_row_q8_K(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q8_K_reference(x, y, k);
+}
+
+//===================================== Dot ptoducts =================================
+
+//
+// Helper functions
+//
+#if __AVX__ || __AVX2__ || __AVX512F__
+
+// shuffles to pick the required scales in dot products
+static inline __m256i get_scale_shuffle_q3k(int i) {
+ static const uint8_t k_shuffle[128] = {
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
+ 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
+ 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
+ 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,
+ };
+ return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
+}
+static inline __m256i get_scale_shuffle_k4(int i) {
+ static const uint8_t k_shuffle[256] = {
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
+ 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
+ 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
+ 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9,
+ 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
+ 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,
+ 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15
+ };
+ return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
+}
+static inline __m128i get_scale_shuffle(int i) {
+ static const uint8_t k_shuffle[128] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+ 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11,
+ 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13,
+ 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15
+ };
+ return _mm_loadu_si128((const __m128i*)k_shuffle + i);
+}
+#endif
+
+void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+
+ const block_q4_0 * restrict x = vx;
+ const block_q8_0 * restrict y = vy;
+
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ assert(nb % 2 == 0); // TODO: handle odd nb
+
+ for (int i = 0; i < nb; i += 2) {
+ const block_q4_0 * restrict x0 = &x[i + 0];
+ const block_q4_0 * restrict x1 = &x[i + 1];
+ const block_q8_0 * restrict y0 = &y[i + 0];
+ const block_q8_0 * restrict y1 = &y[i + 1];
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+ const int8x16_t s8b = vdupq_n_s8(0x8);
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // sub 8
+ const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
+ const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
+ const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
+ const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
+
+ // load y
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ // dot product into int32x4_t
+ const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
+ const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+#else
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0h));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h));
+
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1l));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l));
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1h));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h));
+
+ const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
+ const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
+ const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
+ const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+#endif
+ }
+
+ *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
+#elif defined(__AVX2__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ // Main loop
+ for (int i = 0; i < nb; ++i) {
+ /* Compute combined scale for the block */
+ const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
+
+ __m256i bx = bytes_from_nibbles_32(x[i].qs);
+
+ // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
+ const __m256i off = _mm256_set1_epi8( 8 );
+ bx = _mm256_sub_epi8( bx, off );
+
+ __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+
+ const __m256 q = mul_sum_i8_pairs_float(bx, by);
+
+ /* Multiply q with scale and accumulate */
+ acc = _mm256_fmadd_ps( d, q, acc );
+ }
+
+ *s = hsum_float_8(acc);
+#elif defined(__AVX__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ // Main loop
+ for (int i = 0; i < nb; ++i) {
+ // Compute combined scale for the block
+ const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
+
+ const __m128i lowMask = _mm_set1_epi8(0xF);
+ const __m128i off = _mm_set1_epi8(8);
+
+ const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
+
+ __m128i bx = _mm_and_si128(lowMask, tmp);
+ __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs);
+ bx = _mm_sub_epi8(bx, off);
+ const __m128i i32_0 = mul_sum_i8_pairs(bx, by);
+
+ bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
+ by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
+ bx = _mm_sub_epi8(bx, off);
+ const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
+
+ // Convert int32_t to float
+ __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
+
+ // Apply the scale, and accumulate
+ acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
+ }
+
+ *s = hsum_float_8(acc);
+#elif defined(__SSSE3__)
+ // set constants
+ const __m128i lowMask = _mm_set1_epi8(0xF);
+ const __m128i off = _mm_set1_epi8(8);
+
+ // Initialize accumulator with zeros
+ __m128 acc_0 = _mm_setzero_ps();
+ __m128 acc_1 = _mm_setzero_ps();
+ __m128 acc_2 = _mm_setzero_ps();
+ __m128 acc_3 = _mm_setzero_ps();
+
+ // First round without accumulation
+ {
+ _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
+ _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
+
+ // Compute combined scale for the block 0 and 1
+ const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
+
+ const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
+
+ __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
+ __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
+ bx_0 = _mm_sub_epi8(bx_0, off);
+ const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
+
+ __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
+ __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
+ bx_1 = _mm_sub_epi8(bx_1, off);
+ const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
+
+ _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
+ _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
+
+ // Compute combined scale for the block 2 and 3
+ const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
+
+ const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
+
+ __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
+ __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
+ bx_2 = _mm_sub_epi8(bx_2, off);
+ const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
+
+ __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
+ __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
+ bx_3 = _mm_sub_epi8(bx_3, off);
+ const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
+
+ // Convert int32_t to float
+ __m128 p0 = _mm_cvtepi32_ps(i32_0);
+ __m128 p1 = _mm_cvtepi32_ps(i32_1);
+ __m128 p2 = _mm_cvtepi32_ps(i32_2);
+ __m128 p3 = _mm_cvtepi32_ps(i32_3);
+
+ // Apply the scale
+ acc_0 = _mm_mul_ps( d_0_1, p0 );
+ acc_1 = _mm_mul_ps( d_0_1, p1 );
+ acc_2 = _mm_mul_ps( d_2_3, p2 );
+ acc_3 = _mm_mul_ps( d_2_3, p3 );
+ }
+
+ assert(nb % 2 == 0); // TODO: handle odd nb
+
+ // Main loop
+ for (int i = 2; i < nb; i+=2) {
+ _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
+ _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
+
+ // Compute combined scale for the block 0 and 1
+ const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
+
+ const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
+
+ __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
+ __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
+ bx_0 = _mm_sub_epi8(bx_0, off);
+ const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
+
+ __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
+ __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
+ bx_1 = _mm_sub_epi8(bx_1, off);
+ const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
+
+ _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
+ _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
+
+ // Compute combined scale for the block 2 and 3
+ const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
+
+ const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
+
+ __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
+ __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
+ bx_2 = _mm_sub_epi8(bx_2, off);
+ const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
+
+ __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
+ __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
+ bx_3 = _mm_sub_epi8(bx_3, off);
+ const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
+
+ // Convert int32_t to float
+ __m128 p0 = _mm_cvtepi32_ps(i32_0);
+ __m128 p1 = _mm_cvtepi32_ps(i32_1);
+ __m128 p2 = _mm_cvtepi32_ps(i32_2);
+ __m128 p3 = _mm_cvtepi32_ps(i32_3);
+
+ // Apply the scale
+ __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
+ __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
+ __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
+ __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
+
+ // Acummulate
+ acc_0 = _mm_add_ps(p0_d, acc_0);
+ acc_1 = _mm_add_ps(p1_d, acc_1);
+ acc_2 = _mm_add_ps(p2_d, acc_2);
+ acc_3 = _mm_add_ps(p3_d, acc_3);
+ }
+
+ *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
+#elif defined(__riscv_v_intrinsic)
+ float sumf = 0.0;
+
+ size_t vl = __riscv_vsetvl_e8m1(qk/2);
+
+ for (int i = 0; i < nb; i++) {
+ // load elements
+ vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
+
+ vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
+ vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
+
+ // mask and store lower part of x, and then upper part
+ vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
+ vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
+
+ vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
+ vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
+
+ // subtract offset
+ vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl);
+ vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl);
+
+ vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
+ vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
+
+ vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
+
+ vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
+ vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
+
+ int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
+
+ sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
+ }
+
+ *s = sumf;
+#else
+ // scalar
+ float sumf = 0.0;
+
+ for (int i = 0; i < nb; i++) {
+ int sumi = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const int v0 = (x[i].qs[j] & 0x0F) - 8;
+ const int v1 = (x[i].qs[j] >> 4) - 8;
+
+ sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
+ }
+
+ sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
+ }
+
+ *s = sumf;
+#endif
+}
+
+void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ const int qk = QK8_1;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+
+ const block_q4_1 * restrict x = vx;
+ const block_q8_1 * restrict y = vy;
+
+ // TODO: add WASM SIMD
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ float summs = 0;
+
+ assert(nb % 2 == 0); // TODO: handle odd nb
+
+ for (int i = 0; i < nb; i += 2) {
+ const block_q4_1 * restrict x0 = &x[i + 0];
+ const block_q4_1 * restrict x1 = &x[i + 1];
+ const block_q8_1 * restrict y0 = &y[i + 0];
+ const block_q8_1 * restrict y1 = &y[i + 1];
+
+ summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s;
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // load y
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ // dot product into int32x4_t
+ const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
+ const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d);
+#else
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0h));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0h));
+
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1l));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1l));
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1h));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1h));
+
+ const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
+ const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
+ const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
+ const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
+#endif
+ }
+
+ *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
+#elif defined(__AVX2__) || defined(__AVX__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ float summs = 0;
+
+ // Main loop
+ for (int i = 0; i < nb; ++i) {
+ const float d0 = GGML_FP16_TO_FP32(x[i].d);
+ const float d1 = y[i].d;
+
+ summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
+
+ const __m256 d0v = _mm256_set1_ps( d0 );
+ const __m256 d1v = _mm256_set1_ps( d1 );
+
+ // Compute combined scales
+ const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
+
+ // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
+ const __m256i bx = bytes_from_nibbles_32(x[i].qs);
+ const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs );
+
+ const __m256 xy = mul_sum_us8_pairs_float(bx, by);
+
+ // Accumulate d0*d1*x*y
+#if defined(__AVX2__)
+ acc = _mm256_fmadd_ps( d0d1, xy, acc );
+#else
+ acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
+#endif
+ }
+
+ *s = hsum_float_8(acc) + summs;
+#elif defined(__riscv_v_intrinsic)
+ float sumf = 0.0;
+
+ size_t vl = __riscv_vsetvl_e8m1(qk/2);
+
+ for (int i = 0; i < nb; i++) {
+ // load elements
+ vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
+
+ vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
+ vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
+
+ // mask and store lower part of x, and then upper part
+ vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
+ vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
+
+ vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
+ vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
+
+ vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
+ vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
+
+ vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
+
+ vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
+ vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
+
+ int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
+
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
+ }
+
+ *s = sumf;
+#else
+ // scalar
+ float sumf = 0.0;
+
+ for (int i = 0; i < nb; i++) {
+ int sumi = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const int v0 = (x[i].qs[j] & 0x0F);
+ const int v1 = (x[i].qs[j] >> 4);
+
+ sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
+ }
+
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
+ }
+
+ *s = sumf;
+#endif
+}
+
+void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+ assert(qk == QK5_0);
+
+ const block_q5_0 * restrict x = vx;
+ const block_q8_0 * restrict y = vy;
+
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ uint32_t qh0;
+ uint32_t qh1;
+
+ uint64_t tmp0[4];
+ uint64_t tmp1[4];
+
+ assert(nb % 2 == 0); // TODO: handle odd nb
+
+ for (int i = 0; i < nb; i += 2) {
+ const block_q5_0 * restrict x0 = &x[i];
+ const block_q5_0 * restrict x1 = &x[i + 1];
+ const block_q8_0 * restrict y0 = &y[i];
+ const block_q8_0 * restrict y1 = &y[i + 1];
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+
+ // extract the 5th bit via lookup table ((!b) << 4)
+ memcpy(&qh0, x0->qh, sizeof(qh0));
+ memcpy(&qh1, x1->qh, sizeof(qh1));
+
+ tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
+ tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
+ tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
+ tmp0[3] = table_b2b_1[(qh0 >> 24) ];
+
+ tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
+ tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
+ tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
+ tmp1[3] = table_b2b_1[(qh1 >> 24) ];
+
+ const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
+ const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
+ const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
+ const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
+ const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
+ const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
+ const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
+ const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
+
+ // load y
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
+ vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
+ vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+#else
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
+
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
+
+ const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
+ const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
+ const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
+ const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+#endif
+ }
+
+ *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
+#elif defined(__wasm_simd128__)
+ v128_t sumv = wasm_f32x4_splat(0.0f);
+
+ uint32_t qh;
+ uint64_t tmp[4];
+
+ // TODO: check if unrolling this is better
+ for (int i = 0; i < nb; ++i) {
+ const block_q5_0 * restrict x0 = &x[i];
+ const block_q8_0 * restrict y0 = &y[i];
+
+ const v128_t m4b = wasm_i8x16_splat(0x0F);
+
+ // extract the 5th bit
+ memcpy(&qh, x0->qh, sizeof(qh));
+
+ tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
+ tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
+ tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
+ tmp[3] = table_b2b_1[(qh >> 24) ];
+
+ const v128_t qhl = wasm_v128_load(tmp + 0);
+ const v128_t qhh = wasm_v128_load(tmp + 2);
+
+ const v128_t v0 = wasm_v128_load(x0->qs);
+
+ // 4-bit -> 8-bit
+ const v128_t v0l = wasm_v128_and (v0, m4b);
+ const v128_t v0h = wasm_u8x16_shr(v0, 4);
+
+ // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
+ const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
+ const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
+
+ // load y
+ const v128_t v1l = wasm_v128_load(y0->qs);
+ const v128_t v1h = wasm_v128_load(y0->qs + 16);
+
+ // int8x16 -> int16x8
+ const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
+ const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
+ const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
+ const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
+
+ const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
+ const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
+ const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
+ const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
+
+ // dot product
+ sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
+ wasm_i32x4_add(
+ wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
+ wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
+ wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
+ wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
+ wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
+ }
+
+ *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
+ wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
+#elif defined(__AVX2__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ // Main loop
+ for (int i = 0; i < nb; i++) {
+ /* Compute combined scale for the block */
+ const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
+
+ __m256i bx = bytes_from_nibbles_32(x[i].qs);
+ __m256i bxhi = bytes_from_bits_32(x[i].qh);
+ bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
+ bx = _mm256_or_si256(bx, bxhi);
+
+ __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+
+ const __m256 q = mul_sum_i8_pairs_float(bx, by);
+
+ /* Multiply q with scale and accumulate */
+ acc = _mm256_fmadd_ps(d, q, acc);
+ }
+
+ *s = hsum_float_8(acc);
+#elif defined(__AVX__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+ __m128i mask = _mm_set1_epi8((char)0xF0);
+
+ // Main loop
+ for (int i = 0; i < nb; i++) {
+ /* Compute combined scale for the block */
+ const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
+
+ __m256i bx = bytes_from_nibbles_32(x[i].qs);
+ const __m256i bxhi = bytes_from_bits_32(x[i].qh);
+ __m128i bxhil = _mm256_castsi256_si128(bxhi);
+ __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
+ bxhil = _mm_andnot_si128(bxhil, mask);
+ bxhih = _mm_andnot_si128(bxhih, mask);
+ __m128i bxl = _mm256_castsi256_si128(bx);
+ __m128i bxh = _mm256_extractf128_si256(bx, 1);
+ bxl = _mm_or_si128(bxl, bxhil);
+ bxh = _mm_or_si128(bxh, bxhih);
+ bx = MM256_SET_M128I(bxh, bxl);
+
+ const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+
+ const __m256 q = mul_sum_i8_pairs_float(bx, by);
+
+ /* Multiply q with scale and accumulate */
+ acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
+ }
+
+ *s = hsum_float_8(acc);
+#elif defined(__riscv_v_intrinsic)
+ float sumf = 0.0;
+
+ uint32_t qh;
+
+ size_t vl = __riscv_vsetvl_e8m1(qk/2);
+
+ // These tempory registers are for masking and shift operations
+ vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
+ vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);
+
+ vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl);
+ vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
+
+ for (int i = 0; i < nb; i++) {
+ memcpy(&qh, x[i].qh, sizeof(uint32_t));
+
+ // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
+ vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl);
+ vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl);
+ vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
+
+ // ((qh & (1u << (j + 16))) >> (j + 12));
+ vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl);
+ vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl);
+
+ // narrowing
+ vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl);
+ vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
+
+ vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl);
+ vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
+
+ // load
+ vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
+
+ vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
+ vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
+
+ vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
+ vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
+
+ vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
+ vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
+
+ vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
+ vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
+
+ vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl);
+ vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl);
+
+ vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
+ vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
+
+ vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
+
+ vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
+ vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
+
+ int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
+
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
+ }
+
+ *s = sumf;
+#else
+ // scalar
+ float sumf = 0.0;
+
+ for (int i = 0; i < nb; i++) {
+ uint32_t qh;
+ memcpy(&qh, x[i].qh, sizeof(qh));
+
+ int sumi = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
+ const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
+
+ const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
+ const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
+
+ sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
+ }
+
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
+ }
+
+ *s = sumf;
+#endif
+}
+
+void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ const int qk = QK8_1;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+ assert(qk == QK5_1);
+
+ const block_q5_1 * restrict x = vx;
+ const block_q8_1 * restrict y = vy;
+
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ float summs0 = 0.0f;
+ float summs1 = 0.0f;
+
+ uint32_t qh0;
+ uint32_t qh1;
+
+ uint64_t tmp0[4];
+ uint64_t tmp1[4];
+
+ assert(nb % 2 == 0); // TODO: handle odd nb
+
+ for (int i = 0; i < nb; i += 2) {
+ const block_q5_1 * restrict x0 = &x[i];
+ const block_q5_1 * restrict x1 = &x[i + 1];
+ const block_q8_1 * restrict y0 = &y[i];
+ const block_q8_1 * restrict y1 = &y[i + 1];
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+
+ summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
+ summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
+
+ // extract the 5th bit via lookup table ((b) << 4)
+ memcpy(&qh0, x0->qh, sizeof(qh0));
+ memcpy(&qh1, x1->qh, sizeof(qh1));
+
+ tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
+ tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
+ tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
+ tmp0[3] = table_b2b_0[(qh0 >> 24) ];
+
+ tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
+ tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
+ tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
+ tmp1[3] = table_b2b_0[(qh1 >> 24) ];
+
+ const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
+ const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
+ const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
+ const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // add high bit
+ const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
+ const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
+ const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
+ const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
+
+ // load y
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
+ vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
+ vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d);
+#else
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
+
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
+
+ const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
+ const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
+ const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
+ const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
+#endif
+ }
+
+ *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
+#elif defined(__wasm_simd128__)
+ v128_t sumv = wasm_f32x4_splat(0.0f);
+
+ float summs = 0.0f;
+
+ uint32_t qh;
+ uint64_t tmp[4];
+
+ // TODO: check if unrolling this is better
+ for (int i = 0; i < nb; ++i) {
+ const block_q5_1 * restrict x0 = &x[i];
+ const block_q8_1 * restrict y0 = &y[i];
+
+ summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
+
+ const v128_t m4b = wasm_i8x16_splat(0x0F);
+
+ // extract the 5th bit
+ memcpy(&qh, x0->qh, sizeof(qh));
+
+ tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
+ tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
+ tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
+ tmp[3] = table_b2b_0[(qh >> 24) ];
+
+ const v128_t qhl = wasm_v128_load(tmp + 0);
+ const v128_t qhh = wasm_v128_load(tmp + 2);
+
+ const v128_t v0 = wasm_v128_load(x0->qs);
+
+ // 4-bit -> 8-bit
+ const v128_t v0l = wasm_v128_and (v0, m4b);
+ const v128_t v0h = wasm_u8x16_shr(v0, 4);
+
+ // add high bit
+ const v128_t v0lf = wasm_v128_or(v0l, qhl);
+ const v128_t v0hf = wasm_v128_or(v0h, qhh);
+
+ // load y
+ const v128_t v1l = wasm_v128_load(y0->qs);
+ const v128_t v1h = wasm_v128_load(y0->qs + 16);
+
+ // int8x16 -> int16x8
+ const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
+ const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
+ const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
+ const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
+
+ const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
+ const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
+ const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
+ const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
+
+ // dot product
+ sumv = wasm_f32x4_add(sumv,
+ wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
+ wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
+ wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
+ wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
+ wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
+ wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d)));
+ }
+
+ *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
+ wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
+#elif defined(__AVX2__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ float summs = 0.0f;
+
+ // Main loop
+ for (int i = 0; i < nb; i++) {
+ const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
+
+ summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
+
+ __m256i bx = bytes_from_nibbles_32(x[i].qs);
+ __m256i bxhi = bytes_from_bits_32(x[i].qh);
+ bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
+ bx = _mm256_or_si256(bx, bxhi);
+
+ const __m256 dy = _mm256_set1_ps(y[i].d);
+ const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+
+ const __m256 q = mul_sum_us8_pairs_float(bx, by);
+
+ acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
+ }
+
+ *s = hsum_float_8(acc) + summs;
+#elif defined(__AVX__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+ __m128i mask = _mm_set1_epi8(0x10);
+
+ float summs = 0.0f;
+
+ // Main loop
+ for (int i = 0; i < nb; i++) {
+ const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
+
+ summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
+
+ __m256i bx = bytes_from_nibbles_32(x[i].qs);
+ const __m256i bxhi = bytes_from_bits_32(x[i].qh);
+ __m128i bxhil = _mm256_castsi256_si128(bxhi);
+ __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
+ bxhil = _mm_and_si128(bxhil, mask);
+ bxhih = _mm_and_si128(bxhih, mask);
+ __m128i bxl = _mm256_castsi256_si128(bx);
+ __m128i bxh = _mm256_extractf128_si256(bx, 1);
+ bxl = _mm_or_si128(bxl, bxhil);
+ bxh = _mm_or_si128(bxh, bxhih);
+ bx = MM256_SET_M128I(bxh, bxl);
+
+ const __m256 dy = _mm256_set1_ps(y[i].d);
+ const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+
+ const __m256 q = mul_sum_us8_pairs_float(bx, by);
+
+ acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
+ }
+
+ *s = hsum_float_8(acc) + summs;
+#elif defined(__riscv_v_intrinsic)
+ float sumf = 0.0;
+
+ uint32_t qh;
+
+ size_t vl = __riscv_vsetvl_e8m1(qk/2);
+
+ // temporary registers for shift operations
+ vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
+ vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
+
+ for (int i = 0; i < nb; i++) {
+ memcpy(&qh, x[i].qh, sizeof(uint32_t));
+
+ // load qh
+ vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl);
+
+ // ((qh >> (j + 0)) << 4) & 0x10;
+ vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl);
+ vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
+ vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl);
+
+ // ((qh >> (j + 12)) ) & 0x10;
+ vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl);
+ vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl);
+
+ // narrowing
+ vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl);
+ vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
+
+ vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl);
+ vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
+
+ // load
+ vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
+
+ vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
+ vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
+
+ vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
+ vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
+
+ vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
+ vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
+
+ vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
+ vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
+
+ vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
+ vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
+
+ vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
+
+ vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
+ vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
+
+ int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
+
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
+ }
+
+ *s = sumf;
+#else
+ // scalar
+ float sumf = 0.0;
+
+ for (int i = 0; i < nb; i++) {
+ uint32_t qh;
+ memcpy(&qh, x[i].qh, sizeof(qh));
+
+ int sumi = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
+ const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
+
+ const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
+ const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
+
+ sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
+ }
+
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
+ }
+
+ *s = sumf;
+#endif
+}
+
+void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+
+ const block_q8_0 * restrict x = vx;
+ const block_q8_0 * restrict y = vy;
+
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ assert(nb % 2 == 0); // TODO: handle odd nb
+
+ for (int i = 0; i < nb; i += 2) {
+ const block_q8_0 * restrict x0 = &x[i + 0];
+ const block_q8_0 * restrict x1 = &x[i + 1];
+ const block_q8_0 * restrict y0 = &y[i + 0];
+ const block_q8_0 * restrict y1 = &y[i + 1];
+
+ const int8x16_t x0_0 = vld1q_s8(x0->qs);
+ const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
+ const int8x16_t x1_0 = vld1q_s8(x1->qs);
+ const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
+
+ // load y
+ const int8x16_t y0_0 = vld1q_s8(y0->qs);
+ const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
+ const int8x16_t y1_0 = vld1q_s8(y1->qs);
+ const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
+ vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
+
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
+ vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+
+#else
+ const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0));
+ const int16x8_t p0_1 = vmull_s8(vget_high_s8(x0_0), vget_high_s8(y0_0));
+ const int16x8_t p0_2 = vmull_s8(vget_low_s8 (x0_1), vget_low_s8 (y0_1));
+ const int16x8_t p0_3 = vmull_s8(vget_high_s8(x0_1), vget_high_s8(y0_1));
+
+ const int16x8_t p1_0 = vmull_s8(vget_low_s8 (x1_0), vget_low_s8 (y1_0));
+ const int16x8_t p1_1 = vmull_s8(vget_high_s8(x1_0), vget_high_s8(y1_0));
+ const int16x8_t p1_2 = vmull_s8(vget_low_s8 (x1_1), vget_low_s8 (y1_1));
+ const int16x8_t p1_3 = vmull_s8(vget_high_s8(x1_1), vget_high_s8(y1_1));
+
+ const int32x4_t p0 = vaddq_s32(vpaddlq_s16(p0_0), vpaddlq_s16(p0_1));
+ const int32x4_t p1 = vaddq_s32(vpaddlq_s16(p0_2), vpaddlq_s16(p0_3));
+ const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1));
+ const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3));
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+#endif
+ }
+
+ *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
+#elif defined(__AVX2__) || defined(__AVX__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ // Main loop
+ for (int i = 0; i < nb; ++i) {
+ // Compute combined scale for the block
+ const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
+ __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs);
+ __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+
+ const __m256 q = mul_sum_i8_pairs_float(bx, by);
+
+ // Multiply q with scale and accumulate
+#if defined(__AVX2__)
+ acc = _mm256_fmadd_ps( d, q, acc );
+#else
+ acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
+#endif
+ }
+
+ *s = hsum_float_8(acc);
+#elif defined(__riscv_v_intrinsic)
+ float sumf = 0.0;
+ size_t vl = __riscv_vsetvl_e8m1(qk);
+
+ for (int i = 0; i < nb; i++) {
+ // load elements
+ vint8m1_t bx = __riscv_vle8_v_i8m1(x[i].qs, vl);
+ vint8m1_t by = __riscv_vle8_v_i8m1(y[i].qs, vl);
+
+ vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx, by, vl);
+
+ vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl);
+ vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl);
+
+ int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum);
+
+ sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
+ }
+
+ *s = sumf;
+#else
+ // scalar
+ float sumf = 0.0;
+
+ for (int i = 0; i < nb; i++) {
+ int sumi = 0;
+
+ for (int j = 0; j < qk; j++) {
+ sumi += x[i].qs[j]*y[i].qs[j];
+ }
+
+ sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
+ }
+
+ *s = sumf;
+#endif
+}
+
+#if QK_K == 256
+void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+
+ const block_q2_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ const uint8x16_t m3 = vdupq_n_u8(0x3);
+ const uint8x16_t m4 = vdupq_n_u8(0xF);
+#if defined(__ARM_FEATURE_DOTPROD)
+ const int32x4_t vzero = vdupq_n_s32(0);
+#endif
+
+ int8x16x2_t q2bytes;
+ uint8_t aux[16];
+
+ float sum = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+ const uint8_t * restrict sc = x[i].scales;
+
+ const uint8x16_t mins_and_scales = vld1q_u8(sc);
+ const uint8x16_t scales = vandq_u8(mins_and_scales, m4);
+ vst1q_u8(aux, scales);
+
+ const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4);
+ const int16x8x2_t q8sums = vld1q_s16_x2(y[i].bsums);
+ const int16x8x2_t mins16 = {vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))};
+ const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])),
+ vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0])));
+ const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])),
+ vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1])));
+ sum += dmin * vaddvq_s32(vaddq_s32(s0, s1));
+
+ int isum = 0;
+ int is = 0;
+
+// We use this macro instead of a function call because for some reason
+// the code runs 2-3% slower, even if the function is declared inline
+#if defined(__ARM_FEATURE_DOTPROD)
+#define MULTIPLY_ACCUM_WITH_SCALE(index)\
+ isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\
+ isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)];
+#else
+#define MULTIPLY_ACCUM_WITH_SCALE(index)\
+ {\
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[0]), vget_low_s8 (q8bytes.val[0])),\
+ vmull_s8(vget_high_s8(q2bytes.val[0]), vget_high_s8(q8bytes.val[0])));\
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[1]), vget_low_s8 (q8bytes.val[1])),\
+ vmull_s8(vget_high_s8(q2bytes.val[1]), vget_high_s8(q8bytes.val[1])));\
+ isum += vaddvq_s16(p1) * aux[is+(index)] + vaddvq_s16(p2) * aux[is+1+(index)];\
+ }
+#endif
+
+#define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\
+ q8bytes = vld1q_s8_x2(q8); q8 += 32;\
+ q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\
+ q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\
+ MULTIPLY_ACCUM_WITH_SCALE((index));
+
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const uint8x16x2_t q2bits = vld1q_u8_x2(q2); q2 += 32;
+
+ int8x16x2_t q8bytes = vld1q_s8_x2(q8); q8 += 32;
+ q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3));
+ q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3));
+ MULTIPLY_ACCUM_WITH_SCALE(0);
+
+ SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2);
+
+ SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4);
+
+ SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6);
+
+ is += 8;
+ }
+ sum += d * isum;
+
+ }
+
+ *s = sum;
+
+#elif defined __AVX2__
+
+ const __m256i m3 = _mm256_set1_epi8(3);
+ const __m128i m4 = _mm_set1_epi8(0xF);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
+ const __m128i scales8 = _mm_and_si128(mins_and_scales, m4);
+ const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
+ const __m256i mins = _mm256_cvtepi8_epi16(mins8);
+ const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums));
+
+ acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc);
+
+ const __m256i all_scales = _mm256_cvtepi8_epi16(scales8);
+ const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
+ const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
+ const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
+
+ __m256i sumi = _mm256_setzero_si256();
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32;
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+
+ const __m256i q2_0 = _mm256_and_si256(q2bits, m3);
+ const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3);
+ const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3);
+ const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3);
+
+ __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
+ __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
+ __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2);
+ __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3);
+
+ p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0);
+ p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1);
+ p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2);
+ p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3);
+
+ p0 = _mm256_add_epi32(p0, p1);
+ p2 = _mm256_add_epi32(p2, p3);
+
+ sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2));
+ }
+
+ acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __AVX__
+
+ const __m128i m3 = _mm_set1_epi8(0x3);
+ const __m128i m4 = _mm_set1_epi8(0xF);
+ const __m128i m2 = _mm_set1_epi8(0x2);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ // load mins and scales from block_q2_K.scales[QK_K/16]
+ const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
+ const __m128i scales16 = _mm_and_si128(mins_and_scales, m4);
+ const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
+ const __m128i mins_0 = _mm_cvtepi8_epi16(mins16);
+ const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16));
+
+ // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2
+ const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0]));
+ const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8]));
+
+ // sumf += -dmin * summs in 32bits*8
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc);
+
+ const __m128i scales_0 = _mm_cvtepi8_epi16(scales16);
+ const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16));
+ const __m128i scales[2] = { scales_0, scales_1 };
+
+ __m128i sumi_0 = _mm_setzero_si128();
+ __m128i sumi_1 = _mm_setzero_si128();
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K]
+ const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+
+ // load 2bits*16*8 from block_q2_K.qs[QK_K/4]
+ __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
+ const __m128i q2_0 = _mm_and_si128(q2bits, m3);
+ const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
+ const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
+ const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
+ q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
+ const __m128i q2_1 = _mm_and_si128(q2bits, m3);
+ const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
+ const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
+ const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
+
+ // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8
+ __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0);
+ __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1);
+ __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2);
+ __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3);
+ __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4);
+ __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5);
+ __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6);
+ __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7);
+
+ // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8
+ __m128i shuffle = _mm_set1_epi16(0x0100);
+ p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7);
+
+ p0 = _mm_add_epi32(p0, p1);
+ p2 = _mm_add_epi32(p2, p3);
+ p4 = _mm_add_epi32(p4, p5);
+ p6 = _mm_add_epi32(p6, p7);
+
+ // isum in 32bits*4*2
+ sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2));
+ sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6));
+ }
+
+ // sumf += dall * isum - dmin * summs in 32bits
+ __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc);
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __riscv_v_intrinsic
+
+ float sumf = 0;
+ uint8_t temp_01[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * q2 = x[i].qs;
+ const int8_t * q8 = y[i].qs;
+ const uint8_t * sc = x[i].scales;
+
+ const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ size_t vl = 16;
+
+ vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl);
+ vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl);
+
+ vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl);
+
+ vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl);
+ vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl);
+ vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl));
+ vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl);
+ vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
+
+ sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums);
+
+ vl = 32;
+
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+ vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl);
+
+ uint8_t is=0;
+ int isum=0;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+ // load Q2
+ vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl);
+
+ vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl);
+ vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03 , vl);
+ vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03 , vl);
+ vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03 , vl);
+
+ // duplicate scale elements for product
+ vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0+is, vl), vl);
+ vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2+is, vl), vl);
+ vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4+is, vl), vl);
+ vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6+is, vl), vl);
+
+ vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl));
+ vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl));
+ vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl));
+ vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl));
+
+ // load Q8
+ vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
+ vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
+ vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8+64, vl);
+ vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8+96, vl);
+
+ vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl);
+ vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl);
+ vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl);
+ vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl);
+
+ vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl);
+ vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl);
+
+ isum += __riscv_vmv_x_s_i32m1_i32(isum1);
+
+ q2+=32; q8+=128; is=8;
+
+ }
+
+ sumf += dall * isum;
+
+ }
+
+ *s = sumf;
+
+#else
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * q2 = x[i].qs;
+ const int8_t * q8 = y[i].qs;
+ const uint8_t * sc = x[i].scales;
+
+ int summs = 0;
+ for (int j = 0; j < 16; ++j) {
+ summs += y[i].bsums[j] * (sc[j] >> 4);
+ }
+
+ const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ int isum = 0;
+ int is = 0;
+ int d;
+ for (int k = 0; k < QK_K/128; ++k) {
+ int shift = 0;
+ for (int j = 0; j < 4; ++j) {
+ d = sc[is++] & 0xF;
+ int isuml = 0;
+ for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
+ isum += d * isuml;
+ d = sc[is++] & 0xF;
+ isuml = 0;
+ for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
+ isum += d * isuml;
+ shift += 2;
+ q8 += 32;
+ }
+ q2 += 32;
+ }
+ sumf += dall * isum - dmin * summs;
+ }
+ *s = sumf;
+#endif
+}
+
+#else
+
+void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+
+ const block_q2_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ const uint8x16_t m3 = vdupq_n_u8(0x3);
+#if defined(__ARM_FEATURE_DOTPROD)
+ const int32x4_t vzero = vdupq_n_s32(0);
+#endif
+
+ int8x16x4_t q2bytes;
+
+ uint32_t aux32[2];
+ const uint8_t * scales = (const uint8_t *)aux32;
+
+ float sum = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * (float)x[i].d;
+ const float dmin = -y[i].d * (float)x[i].dmin;
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+ const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
+
+ aux32[0] = sc[0] & 0x0f0f0f0f;
+ aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
+
+ sum += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
+
+ int isum1 = 0, isum2 = 0;
+
+ const uint8x16_t q2bits = vld1q_u8(q2);
+
+ const int8x16x4_t q8bytes = vld1q_s8_x4(q8);
+
+ q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits, m3));
+ q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 2), m3));
+ q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 4), m3));
+ q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 6), m3));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ isum1 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * scales[0];
+ isum2 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * scales[1];
+ isum1 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[2], q8bytes.val[2])) * scales[2];
+ isum2 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[3], q8bytes.val[3])) * scales[3];
+#else
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q2bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q2bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ isum1 += vaddvq_s16(p1) * scales[0];
+ isum2 += vaddvq_s16(p2) * scales[1];
+
+ const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q2bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ const int16x8_t p4 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q2bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ isum1 += vaddvq_s16(p3) * scales[2];
+ isum2 += vaddvq_s16(p4) * scales[3];
+#endif
+ sum += d * (isum1 + isum2);
+
+ }
+
+ *s = sum;
+
+#elif defined __AVX2__
+
+ const __m256i m3 = _mm256_set1_epi8(3);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ uint32_t ud, um;
+ const uint8_t * restrict db = (const uint8_t *)&ud;
+ const uint8_t * restrict mb = (const uint8_t *)&um;
+
+ float summs = 0;
+
+ // TODO: optimize this
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
+ ud = (sc[0] >> 0) & 0x0f0f0f0f;
+ um = (sc[0] >> 4) & 0x0f0f0f0f;
+
+ int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
+ summs += dmin * smin;
+
+ const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
+ const __m256i q2_0 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 2), q2bits), m3);
+ const __m256i q2_1 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ const __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
+ const __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
+
+ const __m256i p_0 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 0));
+ const __m256i p_1 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 1));
+ const __m256i p_2 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 0));
+ const __m256i p_3 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 1));
+
+ acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0), acc);
+ acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1), acc);
+ acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2), acc);
+ acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3), acc);
+ }
+
+ *s = hsum_float_8(acc) + summs;
+
+#elif defined __AVX__
+
+ const __m128i m3 = _mm_set1_epi8(3);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ uint32_t ud, um;
+ const uint8_t * restrict db = (const uint8_t *)&ud;
+ const uint8_t * restrict mb = (const uint8_t *)&um;
+
+ float summs = 0;
+
+ // TODO: optimize this
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
+ ud = (sc[0] >> 0) & 0x0f0f0f0f;
+ um = (sc[0] >> 4) & 0x0f0f0f0f;
+
+ int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
+ summs += dmin * smin;
+
+ const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
+ const __m128i q2_0 = _mm_and_si128(q2bits, m3);
+ const __m128i q2_1 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
+ const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
+ const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ const __m128i p0 = _mm_maddubs_epi16(q2_0, _mm256_extractf128_si256(q8_0, 0));
+ const __m128i p1 = _mm_maddubs_epi16(q2_1, _mm256_extractf128_si256(q8_0, 1));
+ const __m128i p2 = _mm_maddubs_epi16(q2_2, _mm256_extractf128_si256(q8_1, 0));
+ const __m128i p3 = _mm_maddubs_epi16(q2_3, _mm256_extractf128_si256(q8_1, 1));
+
+ const __m256i p_0 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0));
+ const __m256i p_1 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1));
+ const __m256i p_2 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2));
+ const __m256i p_3 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3));
+
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0)), acc);
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1)), acc);
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2)), acc);
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3)), acc);
+ }
+
+ *s = hsum_float_8(acc) + summs;
+
+#elif defined __riscv_v_intrinsic
+
+ uint32_t aux32[2];
+ const uint8_t * scales = (const uint8_t *)aux32;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * (float)x[i].d;
+ const float dmin = -y[i].d * (float)x[i].dmin;
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+ const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
+
+ aux32[0] = sc[0] & 0x0f0f0f0f;
+ aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
+
+ sumf += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
+
+ int isum1 = 0;
+ int isum2 = 0;
+
+ size_t vl = 16;
+
+ vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
+
+ // load Q2
+ vuint8mf2_t q2_x = __riscv_vle8_v_u8mf2(q2, vl);
+
+ vint8mf2_t q2_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q2_x, 0x03, vl));
+ vint8mf2_t q2_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x2, vl), 0x03 , vl));
+ vint8mf2_t q2_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x4, vl), 0x03 , vl));
+ vint8mf2_t q2_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x6, vl), 0x03 , vl));
+
+ // load Q8, and take product with Q2
+ vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q2_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
+ vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q2_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
+ vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q2_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
+ vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q2_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
+
+ vint16m1_t vs_0 = __riscv_vredsum_vs_i16m1_i16m1(p0, vzero, vl);
+ vint16m1_t vs_1 = __riscv_vredsum_vs_i16m1_i16m1(p1, vzero, vl);
+ vint16m1_t vs_2 = __riscv_vredsum_vs_i16m1_i16m1(p2, vzero, vl);
+ vint16m1_t vs_3 = __riscv_vredsum_vs_i16m1_i16m1(p3, vzero, vl);
+
+ isum1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[0];
+ isum2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[1];
+ isum1 += __riscv_vmv_x_s_i16m1_i16(vs_2) * scales[2];
+ isum2 += __riscv_vmv_x_s_i16m1_i16(vs_3) * scales[3];
+
+ sumf += d * (isum1 + isum2);
+
+ }
+
+ *s = sumf;
+
+#else
+
+ float sumf = 0;
+
+ int isum[4];
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * q2 = x[i].qs;
+ const int8_t * q8 = y[i].qs;
+ const uint8_t * sc = x[i].scales;
+
+ int summs = 0;
+ for (int j = 0; j < QK_K/16; ++j) {
+ summs += y[i].bsums[j] * (sc[j] >> 4);
+ }
+
+ const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ isum[0] = isum[1] = isum[2] = isum[3] = 0;
+ for (int l = 0; l < 16; ++l) {
+ isum[0] += q8[l+ 0] * ((q2[l] >> 0) & 3);
+ isum[1] += q8[l+16] * ((q2[l] >> 2) & 3);
+ isum[2] += q8[l+32] * ((q2[l] >> 4) & 3);
+ isum[3] += q8[l+48] * ((q2[l] >> 6) & 3);
+ }
+ for (int l = 0; l < 4; ++l) {
+ isum[l] *= (sc[l] & 0xF);
+ }
+ sumf += dall * (isum[0] + isum[1] + isum[2] + isum[3]) - dmin * summs;
+ }
+ *s = sumf;
+#endif
+}
+#endif
+
+#if QK_K == 256
+void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const uint32_t kmask1 = 0x03030303;
+ const uint32_t kmask2 = 0x0f0f0f0f;
+
+ const block_q3_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ uint32_t aux[3];
+ uint32_t utmp[4];
+
+ const uint8x16_t m3b = vdupq_n_u8(0x3);
+#ifdef __ARM_FEATURE_DOTPROD
+ const int32x4_t vzero = vdupq_n_s32(0);
+#endif
+
+ const uint8x16_t m0 = vdupq_n_u8(1);
+ const uint8x16_t m1 = vshlq_n_u8(m0, 1);
+ const uint8x16_t m2 = vshlq_n_u8(m0, 2);
+ const uint8x16_t m3 = vshlq_n_u8(m0, 3);
+ const int8_t m32 = 32;
+
+ int8x16x4_t q3bytes;
+
+ float sum = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const uint8_t * restrict qh = x[i].hmask;
+ const int8_t * restrict q8 = y[i].qs;
+
+ uint8x16x2_t qhbits = vld1q_u8_x2(qh);
+
+ uint8x16x4_t q3h;
+
+ int32_t isum = 0;
+
+ // Set up scales
+ memcpy(aux, x[i].scales, 12);
+ utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
+ utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
+ utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
+ utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
+
+ int8_t * scale = (int8_t *)utmp;
+ for (int j = 0; j < 16; ++j) scale[j] -= m32;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const uint8x16x2_t q3bits = vld1q_u8_x2(q3); q3 += 32;
+ const int8x16x4_t q8bytes_1 = vld1q_s8_x4(q8); q8 += 64;
+ const int8x16x4_t q8bytes_2 = vld1q_s8_x4(q8); q8 += 64;
+
+ q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2);
+ q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2);
+ q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1);
+ q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1);
+
+ q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0]));
+ q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1]));
+ q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
+ q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3];
+#else
+ int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[0]), vget_low_s8 (q8bytes_1.val[0])),
+ vmull_s8(vget_high_s8(q3bytes.val[0]), vget_high_s8(q8bytes_1.val[0])));
+ int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[1]), vget_low_s8 (q8bytes_1.val[1])),
+ vmull_s8(vget_high_s8(q3bytes.val[1]), vget_high_s8(q8bytes_1.val[1])));
+ int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[2]), vget_low_s8 (q8bytes_1.val[2])),
+ vmull_s8(vget_high_s8(q3bytes.val[2]), vget_high_s8(q8bytes_1.val[2])));
+ int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[3]), vget_low_s8 (q8bytes_1.val[3])),
+ vmull_s8(vget_high_s8(q3bytes.val[3]), vget_high_s8(q8bytes_1.val[3])));
+ isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1] + vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3];
+#endif
+ scale += 4;
+
+ q3h.val[0] = vbicq_u8(m2, qhbits.val[0]);
+ q3h.val[1] = vbicq_u8(m2, qhbits.val[1]);
+ q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1);
+ q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1);
+
+ q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0]));
+ q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1]));
+ q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
+ q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3];
+#else
+ p0 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[0]), vget_low_s8 (q8bytes_2.val[0])),
+ vmull_s8(vget_high_s8(q3bytes.val[0]), vget_high_s8(q8bytes_2.val[0])));
+ p1 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[1]), vget_low_s8 (q8bytes_2.val[1])),
+ vmull_s8(vget_high_s8(q3bytes.val[1]), vget_high_s8(q8bytes_2.val[1])));
+ p2 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[2]), vget_low_s8 (q8bytes_2.val[2])),
+ vmull_s8(vget_high_s8(q3bytes.val[2]), vget_high_s8(q8bytes_2.val[2])));
+ p3 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[3]), vget_low_s8 (q8bytes_2.val[3])),
+ vmull_s8(vget_high_s8(q3bytes.val[3]), vget_high_s8(q8bytes_2.val[3])));
+ isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1] + vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3];
+#endif
+ scale += 4;
+
+ if (j == 0) {
+ qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4);
+ qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4);
+ }
+
+ }
+ sum += d * isum;
+
+ }
+
+ *s = sum;
+
+#elif defined __AVX2__
+
+ const __m256i m3 = _mm256_set1_epi8(3);
+ const __m256i mone = _mm256_set1_epi8(1);
+ const __m128i m32 = _mm_set1_epi8(32);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ uint32_t aux[3];
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ // Set up scales
+ memcpy(aux, x[i].scales, 12);
+ __m128i scales128 = _mm_set_epi32(
+ ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
+ ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
+ (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
+ (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
+ scales128 = _mm_sub_epi8(scales128, m32);
+ const __m256i all_scales = _mm256_cvtepi8_epi16(scales128);
+ const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
+ const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
+ const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
+
+ // high bit
+ const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask);
+
+ // integer accumulator
+ __m256i sumi = _mm256_setzero_si256();
+
+ int bit = 0;
+ int is = 0;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+ // load low 2 bits
+ const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32;
+
+ // prepare low and high bits
+ const __m256i q3l_0 = _mm256_and_si256(q3bits, m3);
+ const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
+ ++bit;
+
+ const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3);
+ const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
+ ++bit;
+
+ const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3);
+ const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
+ ++bit;
+
+ const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3);
+ const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
+ ++bit;
+
+ // load Q8 quants
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+
+ // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
+ // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
+ // and 2 if the high bit was set)
+ __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
+ __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
+ __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2);
+ __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3);
+
+ __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
+ __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
+ __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2);
+ __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3);
+
+ p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
+ p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
+ p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
+
+ // multiply with scales
+ p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0);
+ p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1);
+ p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2);
+ p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3);
+
+ // accumulate
+ p16_0 = _mm256_add_epi32(p16_0, p16_1);
+ p16_2 = _mm256_add_epi32(p16_2, p16_3);
+ sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2));
+
+ }
+
+ // multiply with block scale and accumulate
+ acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __AVX__
+
+ const __m128i m3 = _mm_set1_epi8(3);
+ const __m128i mone = _mm_set1_epi8(1);
+ const __m128i m32 = _mm_set1_epi8(32);
+ const __m128i m2 = _mm_set1_epi8(2);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ const uint32_t *aux;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ // Set up scales
+ aux = (const uint32_t *)x[i].scales;
+ __m128i scales128 = _mm_set_epi32(
+ ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
+ ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
+ (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
+ (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
+ scales128 = _mm_sub_epi8(scales128, m32);
+ const __m128i scales_0 = _mm_cvtepi8_epi16(scales128);
+ const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128));
+ const __m128i scales[2] = { scales_0, scales_1 };
+
+ // high bit *128*2 from block_q3_K.hmask[QK_K/8]
+ const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]);
+ const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]);
+
+ // integer accumulator
+ __m128i sumi_0 = _mm_setzero_si128();
+ __m128i sumi_1 = _mm_setzero_si128();
+
+ for (int j = 0; j < QK_K/128; ++j) {
+ // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4]
+ const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
+ const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
+
+ // prepare low and high bits
+ const int bit = j << 2;
+
+ const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3);
+ const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3);
+ const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2);
+ const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2);
+
+ const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3);
+ const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3);
+ const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
+ const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
+
+ const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3);
+ const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3);
+ const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
+ const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
+
+ const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3);
+ const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3);
+ const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
+ const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
+
+ // load Q8 quants from block_q8_K.qs[QK_K]
+ const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+
+ // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
+ // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
+ // and 2 if the high bit was set)
+ __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0);
+ __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1);
+ __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2);
+ __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3);
+ __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4);
+ __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5);
+ __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6);
+ __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7);
+
+ __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0);
+ __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1);
+ __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2);
+ __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3);
+ __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4);
+ __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5);
+ __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6);
+ __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7);
+
+ p16_0 = _mm_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm_sub_epi16(p16_1, q8s_1);
+ p16_2 = _mm_sub_epi16(p16_2, q8s_2);
+ p16_3 = _mm_sub_epi16(p16_3, q8s_3);
+ p16_4 = _mm_sub_epi16(p16_4, q8s_4);
+ p16_5 = _mm_sub_epi16(p16_5, q8s_5);
+ p16_6 = _mm_sub_epi16(p16_6, q8s_6);
+ p16_7 = _mm_sub_epi16(p16_7, q8s_7);
+
+ // multiply with scales
+ __m128i shuffle = _mm_set1_epi16(0x0100);
+ p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7);
+
+ // accumulate
+ p16_0 = _mm_add_epi32(p16_0, p16_1);
+ p16_2 = _mm_add_epi32(p16_2, p16_3);
+ p16_4 = _mm_add_epi32(p16_4, p16_5);
+ p16_6 = _mm_add_epi32(p16_6, p16_7);
+ sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
+ sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6));
+
+ }
+
+ // multiply with block scale and accumulate
+ __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __riscv_v_intrinsic
+
+ uint32_t aux[3];
+ uint32_t utmp[4];
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const uint8_t * restrict qh = x[i].hmask;
+ const int8_t * restrict q8 = y[i].qs;
+
+ memcpy(aux, x[i].scales, 12);
+ utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
+ utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
+ utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
+ utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
+
+ int8_t * scale = (int8_t *)utmp;
+ for (int j = 0; j < 16; ++j) scale[j] -= 32;
+
+
+ size_t vl = 32;
+ uint8_t m = 1;
+
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+ vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl);
+
+ int sum_t = 0;
+
+ for (int j = 0; j < QK_K; j += 128) {
+
+ vl = 32;
+
+ // load Q3
+ vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl);
+
+ vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl));
+ vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl));
+ vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl));
+ vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl));
+
+ // compute mask for subtraction
+ vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl);
+ vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl);
+ vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_m(vmask_0, q3_0, 0x4, vl);
+ m <<= 1;
+
+ vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
+ vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl);
+ vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_m(vmask_1, q3_1, 0x4, vl);
+ m <<= 1;
+
+ vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
+ vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl);
+ vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_m(vmask_2, q3_2, 0x4, vl);
+ m <<= 1;
+
+ vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl);
+ vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl);
+ vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_m(vmask_3, q3_3, 0x4, vl);
+ m <<= 1;
+
+ // load Q8 and take product with Q3
+ vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl);
+ vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
+ vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
+ vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
+
+ vl = 16;
+
+ // retreive lane to multiply with scale
+ vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl);
+ vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl);
+ vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl);
+ vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl);
+ vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl);
+ vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl);
+ vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl);
+ vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl);
+
+ vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl);
+ vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl);
+ vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl);
+ vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl);
+
+ sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
+
+ q3 += 32; q8 += 128; scale += 8;
+
+ }
+
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+
+ sumf += d*sum_t;
+
+ }
+
+ *s = sumf;
+
+#else
+ // scalar version
+ // This function is written like this so the compiler can manage to vectorize most of it
+ // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
+ // manually vectorized version above. Every other version I tried would run at least 4 times slower.
+ // The ideal situation would be if we could just write the code once, and the compiler would
+ // automatically produce the best possible set of machine instructions, instead of us having to manually
+ // write vectorized versions for AVX, ARM_NEON, etc.
+
+ int8_t aux8[QK_K];
+ int16_t aux16[8];
+ float sums [8];
+ int32_t aux32[8];
+ memset(sums, 0, 8*sizeof(float));
+
+ uint32_t auxs[4];
+ const int8_t * scales = (const int8_t*)auxs;
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q3 = x[i].qs;
+ const uint8_t * restrict hm = x[i].hmask;
+ const int8_t * restrict q8 = y[i].qs;
+ memset(aux32, 0, 8*sizeof(int32_t));
+ int8_t * restrict a = aux8;
+ uint8_t m = 1;
+ for (int j = 0; j < QK_K; j += 128) {
+ for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
+ for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
+ a += 32; m <<= 1;
+ for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
+ for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
+ a += 32; m <<= 1;
+ for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
+ for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
+ a += 32; m <<= 1;
+ for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
+ for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
+ a += 32; m <<= 1;
+ q3 += 32;
+ }
+ a = aux8;
+
+ memcpy(auxs, x[i].scales, 12);
+ uint32_t tmp = auxs[2];
+ auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
+ auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
+ auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
+ auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
+ for (int j = 0; j < QK_K/16; ++j) {
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
+ q8 += 8; a += 8;
+ }
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+
+#endif
+
+}
+
+#else
+
+void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q3_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+#ifdef __ARM_FEATURE_DOTPROD
+ const int32x4_t vzero = vdupq_n_s32(0);
+#endif
+
+ const uint8x16_t m3b = vdupq_n_u8(0x3);
+ const uint8x16_t mh = vdupq_n_u8(4);
+
+ int8x16x4_t q3bytes;
+
+ uint16_t aux16[2];
+ int8_t * scales = (int8_t *)aux16;
+
+ float sum = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ uint8x16x4_t q3h;
+
+ const uint8x8_t hbits = vld1_u8(x[i].hmask);
+ const uint8x16_t q3bits = vld1q_u8(x[i].qs);
+ const int8x16x4_t q8bytes = vld1q_s8_x4(y[i].qs);
+
+ const uint16_t a = *(const uint16_t *)x[i].scales;
+ aux16[0] = a & 0x0f0f;
+ aux16[1] = (a >> 4) & 0x0f0f;
+
+ for (int j = 0; j < 4; ++j) scales[j] -= 8;
+
+ int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
+
+ const float d = y[i].d * (float)x[i].d;
+
+ const uint8x16_t htmp = vcombine_u8(hbits, vshr_n_u8(hbits, 1));
+ q3h.val[0] = vandq_u8(mh, vshlq_n_u8(htmp, 2));
+ q3h.val[1] = vandq_u8(mh, htmp);
+ q3h.val[2] = vandq_u8(mh, vshrq_n_u8(htmp, 2));
+ q3h.val[3] = vandq_u8(mh, vshrq_n_u8(htmp, 4));
+
+ q3bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q3bits, m3b), q3h.val[0]));
+ q3bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 2), m3b), q3h.val[1]));
+ q3bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 4), m3b), q3h.val[2]));
+ q3bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q3bits, 6), q3h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[0], q8bytes.val[0])) * scales[0];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[1], q8bytes.val[1])) * scales[2];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[2], q8bytes.val[2])) * scales[1];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[3], q8bytes.val[3])) * scales[3];
+#else
+ const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q3bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q3bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q3bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q3bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ isum += vaddvq_s16(p0) * scales[0] + vaddvq_s16(p1) * scales[2] + vaddvq_s16(p2) * scales[1] + vaddvq_s16(p3) * scales[3];
+#endif
+
+ sum += d * isum;
+
+ }
+
+ *s = sum;
+
+#elif defined __AVX2__
+
+ const __m256i m3 = _mm256_set1_epi8(3);
+ const __m256i m1 = _mm256_set1_epi8(1);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ uint64_t aux64;
+
+ uint16_t aux16[2];
+ const int8_t * aux8 = (const int8_t *)aux16;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint16_t a = *(const uint16_t *)x[i].scales;
+ aux16[0] = a & 0x0f0f;
+ aux16[1] = (a >> 4) & 0x0f0f;
+
+ const __m256i scale_0 = MM256_SET_M128I(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
+ const __m256i scale_1 = MM256_SET_M128I(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
+
+ memcpy(&aux64, x[i].hmask, 8);
+
+ const __m128i haux = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
+ __m256i q3h_0 = MM256_SET_M128I(_mm_srli_epi16(haux, 2), haux);
+ __m256i q3h_1 = _mm256_srli_epi16(q3h_0, 4);
+ q3h_0 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_0, m1), 2);
+ q3h_1 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_1, m1), 2);
+
+ // load low 2 bits
+ const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
+
+ // prepare low and high bits
+ const __m256i q3aux = MM256_SET_M128I(_mm_srli_epi16(q3bits, 2), q3bits);
+ const __m256i q3l_0 = _mm256_and_si256(q3aux, m3);
+ const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3aux, 4), m3);
+
+ // load Q8 quants
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
+ // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
+ // and 2 if the high bit was set)
+ const __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
+ const __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
+
+ __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
+ __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
+
+ p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
+
+ // multiply with scales
+ p16_0 = _mm256_madd_epi16(scale_0, p16_0);
+ p16_1 = _mm256_madd_epi16(scale_1, p16_1);
+
+ p16_0 = _mm256_add_epi32(p16_0, p16_1);
+
+ // multiply with block scale and accumulate
+ acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16_0), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __AVX__
+
+ const __m128i m3 = _mm_set1_epi8(3);
+ const __m128i m1 = _mm_set1_epi8(1);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ uint64_t aux64;
+
+ uint16_t aux16[2];
+ const int8_t * aux8 = (const int8_t *)aux16;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint16_t a = *(const uint16_t *)x[i].scales;
+ aux16[0] = a & 0x0f0f;
+ aux16[1] = (a >> 4) & 0x0f0f;
+
+ const __m128i scale_0 = _mm_set1_epi16(aux8[0] - 8);
+ const __m128i scale_1 = _mm_set1_epi16(aux8[2] - 8);
+ const __m128i scale_2 = _mm_set1_epi16(aux8[1] - 8);
+ const __m128i scale_3 = _mm_set1_epi16(aux8[3] - 8);
+
+ memcpy(&aux64, x[i].hmask, 8);
+
+ __m128i q3h_0 = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
+ __m128i q3h_1 = _mm_srli_epi16(q3h_0, 2);
+ __m128i q3h_2 = _mm_srli_epi16(q3h_0, 4);
+ __m128i q3h_3 = _mm_srli_epi16(q3h_0, 6);
+ q3h_0 = _mm_slli_epi16(_mm_andnot_si128(q3h_0, m1), 2);
+ q3h_1 = _mm_slli_epi16(_mm_andnot_si128(q3h_1, m1), 2);
+ q3h_2 = _mm_slli_epi16(_mm_andnot_si128(q3h_2, m1), 2);
+ q3h_3 = _mm_slli_epi16(_mm_andnot_si128(q3h_3, m1), 2);
+
+ // load low 2 bits
+ const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
+
+ // prepare low and high bits
+ const __m128i q3l_0 = _mm_and_si128(q3bits, m3);
+ const __m128i q3l_1 = _mm_and_si128(_mm_srli_epi16(q3bits, 2), m3);
+ const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits, 4), m3);
+ const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits, 6), m3);
+
+ // load Q8 quants
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm_maddubs_epi16,
+ // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
+ // and 2 if the high bit was set)
+ const __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, _mm256_extractf128_si256(q8_0, 0));
+ const __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, _mm256_extractf128_si256(q8_0, 1));
+ const __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, _mm256_extractf128_si256(q8_1, 0));
+ const __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, _mm256_extractf128_si256(q8_1, 1));
+
+ __m128i p16_0 = _mm_maddubs_epi16(q3l_0, _mm256_extractf128_si256(q8_0, 0));
+ __m128i p16_1 = _mm_maddubs_epi16(q3l_1, _mm256_extractf128_si256(q8_0, 1));
+ __m128i p16_2 = _mm_maddubs_epi16(q3l_2, _mm256_extractf128_si256(q8_1, 0));
+ __m128i p16_3 = _mm_maddubs_epi16(q3l_3, _mm256_extractf128_si256(q8_1, 1));
+
+ p16_0 = _mm_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm_sub_epi16(p16_1, q8s_1);
+ p16_2 = _mm_sub_epi16(p16_2, q8s_2);
+ p16_3 = _mm_sub_epi16(p16_3, q8s_3);
+
+ // multiply with scales
+ p16_0 = _mm_madd_epi16(scale_0, p16_0);
+ p16_1 = _mm_madd_epi16(scale_1, p16_1);
+ p16_2 = _mm_madd_epi16(scale_2, p16_2);
+ p16_3 = _mm_madd_epi16(scale_3, p16_3);
+
+ p16_0 = _mm_add_epi32(p16_0, p16_2);
+ p16_1 = _mm_add_epi32(p16_1, p16_3);
+ __m256i p16 = MM256_SET_M128I(p16_1, p16_0);
+
+ // multiply with block scale and accumulate
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16)), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __riscv_v_intrinsic
+
+ uint16_t aux16[2];
+ int8_t * scales = (int8_t *)aux16;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint16_t a = *(const uint16_t *)x[i].scales;
+ aux16[0] = a & 0x0f0f;
+ aux16[1] = (a >> 4) & 0x0f0f;
+
+ for (int j = 0; j < 4; ++j) scales[j] -= 8;
+
+ int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
+
+ const float d = y[i].d * (float)x[i].d;
+
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+
+ // load qh
+ vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(x[i].hmask, 8);
+ vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
+
+ size_t vl = 16;
+
+ // extend and combine both qh_x1 and qh_x2
+ vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
+
+ vuint8mf2_t qh_0 = __riscv_vand_vx_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
+ vuint8mf2_t qh_1 = __riscv_vand_vx_u8mf2(qh_x, 0x4, vl);
+ vuint8mf2_t qh_2 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
+ vuint8mf2_t qh_3 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), 0x4, vl);
+
+ // load Q3
+ vuint8mf2_t q3_x = __riscv_vle8_v_u8mf2(q3, vl);
+
+ vuint8mf2_t q3h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q3_x, 0x3, vl), qh_0, vl);
+ vuint8mf2_t q3h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 2, vl), 0x3, vl), qh_1, vl);
+ vuint8mf2_t q3h_2 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 4, vl), 0x3, vl), qh_2, vl);
+ vuint8mf2_t q3h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 0x6, vl), qh_3, vl);
+
+ vint8mf2_t q3_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_0);
+ vint8mf2_t q3_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_1);
+ vint8mf2_t q3_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_2);
+ vint8mf2_t q3_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_3);
+
+ // load Q8 and take product with Q3
+ vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q3_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
+ vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q3_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
+ vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q3_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
+ vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q3_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
+
+ vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
+ vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
+ vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
+ vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
+
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scales[0];
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scales[2];
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scales[1];
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scales[3];
+
+ sumf += d * isum;
+
+ }
+
+ *s = sumf;
+
+#else
+
+ int8_t aux8[QK_K];
+ int16_t aux16[8];
+ float sums [8];
+ int32_t aux32[8];
+ int32_t scales[4];
+ memset(sums, 0, 8*sizeof(float));
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q3 = x[i].qs;
+ const uint8_t * restrict hm = x[i].hmask;
+ const int8_t * restrict q8 = y[i].qs;
+ int8_t * restrict a = aux8;
+ for (int l = 0; l < 8; ++l) {
+ a[l+ 0] = (int8_t)((q3[l+0] >> 0) & 3) - (hm[l] & 0x01 ? 0 : 4);
+ a[l+ 8] = (int8_t)((q3[l+8] >> 0) & 3) - (hm[l] & 0x02 ? 0 : 4);
+ a[l+16] = (int8_t)((q3[l+0] >> 2) & 3) - (hm[l] & 0x04 ? 0 : 4);
+ a[l+24] = (int8_t)((q3[l+8] >> 2) & 3) - (hm[l] & 0x08 ? 0 : 4);
+ a[l+32] = (int8_t)((q3[l+0] >> 4) & 3) - (hm[l] & 0x10 ? 0 : 4);
+ a[l+40] = (int8_t)((q3[l+8] >> 4) & 3) - (hm[l] & 0x20 ? 0 : 4);
+ a[l+48] = (int8_t)((q3[l+0] >> 6) & 3) - (hm[l] & 0x40 ? 0 : 4);
+ a[l+56] = (int8_t)((q3[l+8] >> 6) & 3) - (hm[l] & 0x80 ? 0 : 4);
+ }
+
+ scales[0] = (x[i].scales[0] & 0xF) - 8;
+ scales[1] = (x[i].scales[0] >> 4) - 8;
+ scales[2] = (x[i].scales[1] & 0xF) - 8;
+ scales[3] = (x[i].scales[1] >> 4) - 8;
+
+ memset(aux32, 0, 8*sizeof(int32_t));
+ for (int j = 0; j < QK_K/16; ++j) {
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] += q8[l] * a[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux32[l] += scales[j] * aux16[l];
+ }
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+
+#endif
+
+}
+#endif
+
+#if QK_K == 256
+void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q4_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+ static const uint32_t kmask1 = 0x3f3f3f3f;
+ static const uint32_t kmask2 = 0x0f0f0f0f;
+ static const uint32_t kmask3 = 0x03030303;
+
+ uint32_t utmp[4];
+
+#ifdef __ARM_NEON
+
+ const uint8x16_t m4b = vdupq_n_u8(0xf);
+#ifdef __ARM_FEATURE_DOTPROD
+ const int32x4_t mzero = vdupq_n_s32(0);
+#endif
+
+ int8x16x2_t q4bytes;
+ int8x16x2_t q8bytes;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
+
+ memcpy(utmp, x[i].scales, 12);
+
+ uint32x2_t mins8 = { 0 };
+ mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0);
+ mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1);
+
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[0] &= kmask1;
+
+ const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
+ const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
+ vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
+ sumf -= dmin * vaddvq_s32(prod);
+
+ const uint8_t * scales = (const uint8_t *)utmp;
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ int32_t sumi1 = 0;
+ int32_t sumi2 = 0;
+
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const uint8x16x2_t q4bits = vld1q_u8_x2(q4); q4 += 32;
+
+#ifdef __ARM_FEATURE_DOTPROD
+ q8bytes = vld1q_s8_x2(q8); q8 += 32;
+ q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
+
+ const int32x4_t p1 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
+ sumi1 += vaddvq_s32(p1) * scales[2*j+0];
+
+ q8bytes = vld1q_s8_x2(q8); q8 += 32;
+ q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
+
+ const int32x4_t p2 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
+
+ sumi2 += vaddvq_s32(p2) * scales[2*j+1];
+#else
+ q8bytes = vld1q_s8_x2(q8); q8 += 32;
+ q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
+ const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ sumi1 += vaddvq_s16(vaddq_s16(p0, p1)) * scales[2*j+0];
+
+ q8bytes = vld1q_s8_x2(q8); q8 += 32;
+ q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ sumi2 += vaddvq_s16(vaddq_s16(p2, p3)) * scales[2*j+1];
+
+#endif
+ }
+
+ sumf += d * (sumi1 + sumi2);
+
+ }
+
+ *s = sumf;
+
+#elif defined __AVX2__
+
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+
+ __m256 acc = _mm256_setzero_ps();
+ __m128 acc_m = _mm_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
+
+ const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
+ const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
+ const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
+ acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m);
+
+ const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
+ const __m256i scales = MM256_SET_M128I(sc128, sc128);
+
+ __m256i sumi = _mm256_setzero_si256();
+
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
+ const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
+
+ const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
+ const __m256i q4l = _mm256_and_si256(q4bits, m4);
+ const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
+
+ const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
+ p16l = _mm256_madd_epi16(scale_l, p16l);
+
+ const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
+ p16h = _mm256_madd_epi16(scale_h, p16h);
+ const __m256i sumj = _mm256_add_epi32(p16l, p16h);
+
+ sumi = _mm256_add_epi32(sumi, sumj);
+ }
+
+ __m256 vd = _mm256_set1_ps(d);
+ acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
+
+ }
+
+ acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
+ acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
+
+ *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
+
+#elif defined __AVX__
+
+ const __m128i m4 = _mm_set1_epi8(0xF);
+ const __m128i m2 = _mm_set1_epi8(0x2);
+
+ __m256 acc = _mm256_setzero_ps();
+ __m128 acc_m = _mm_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
+ const __m128i scales = _mm_cvtepu8_epi16(utmps);
+ const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
+
+ const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
+ const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
+ const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
+ const __m128i prod = _mm_madd_epi16(mins, q8s);
+ acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m);
+
+ __m128i sumi_0 = _mm_setzero_si128();
+ __m128i sumi_1 = _mm_setzero_si128();
+
+ __m128i shuffle = _mm_set1_epi16(0x0100);
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi16(shuffle, m2);
+
+ __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
+ const __m128i q4l_0 = _mm_and_si128(q4bits, m4);
+ const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
+ q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
+ const __m128i q4l_1 = _mm_and_si128(q4bits, m4);
+ const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
+
+ const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0);
+ p16l = _mm_madd_epi16(scale_l, p16l);
+ sumi_0 = _mm_add_epi32(sumi_0, p16l);
+ const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ p16l = _mm_maddubs_epi16(q4l_1, q8l_1);
+ p16l = _mm_madd_epi16(scale_l, p16l);
+ sumi_1 = _mm_add_epi32(sumi_1, p16l);
+
+ const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0);
+ p16h = _mm_madd_epi16(scale_h, p16h);
+ sumi_0 = _mm_add_epi32(sumi_0, p16h);
+ const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ p16h = _mm_maddubs_epi16(q4h_1, q8h_1);
+ p16h = _mm_madd_epi16(scale_h, p16h);
+ sumi_1 = _mm_add_epi32(sumi_1, p16h);
+
+ }
+
+ __m256 vd = _mm256_set1_ps(d);
+ __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
+ acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
+
+ }
+
+ acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
+ acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
+
+ *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
+
+#elif defined __riscv_v_intrinsic
+
+ const uint8_t * scales = (const uint8_t*)&utmp[0];
+ const uint8_t * mins = (const uint8_t*)&utmp[2];
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ size_t vl = 8;
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
+ vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
+ vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
+ vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
+ vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
+
+ vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
+ sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ vl = 32;
+
+ int32_t sum_1 = 0;
+ int32_t sum_2 = 0;
+
+ vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
+
+ for (int j = 0; j < QK_K/64; ++j) {
+ // load Q4
+ vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
+
+ // load Q8 and multiply it with lower Q4 nibble
+ vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
+ vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
+ vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl);
+ vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl);
+
+ sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0];
+
+ // load Q8 and multiply it with upper Q4 nibble
+ vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
+ vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
+ vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl);
+ vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl);
+
+ sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1];
+
+ q4 += 32; q8 += 64;
+
+ }
+
+ sumf += d*(sum_1 + sum_2);
+
+ }
+
+ *s = sumf;
+
+#else
+
+
+ const uint8_t * scales = (const uint8_t*)&utmp[0];
+ const uint8_t * mins = (const uint8_t*)&utmp[2];
+
+ int8_t aux8[QK_K];
+ int16_t aux16[8];
+ float sums [8];
+ int32_t aux32[8];
+ memset(sums, 0, 8*sizeof(float));
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+ memset(aux32, 0, 8*sizeof(int32_t));
+ int8_t * restrict a = aux8;
+ for (int j = 0; j < QK_K/64; ++j) {
+ for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
+ a += 32;
+ for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
+ a += 32; q4 += 32;
+ }
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ int sumi = 0;
+ for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
+ a = aux8;
+ int is = 0;
+ for (int j = 0; j < QK_K/32; ++j) {
+ int32_t scale = scales[is++];
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ }
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
+ const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
+ sumf -= dmin * sumi;
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+#endif
+}
+#else
+void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q4_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ const uint8x16_t m4b = vdupq_n_u8(0xf);
+
+#ifdef __ARM_FEATURE_DOTPROD
+ const int32x4_t mzero = vdupq_n_s32(0);
+#endif
+
+ float sumf = 0;
+
+ int8x16x2_t q4bytes;
+ int8x16x4_t q8bytes;
+
+ float sum_mins = 0.f;
+
+ uint16_t aux16[2];
+ const uint8_t * restrict scales = (const uint8_t *)aux16;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint16_t * restrict a = (const uint16_t *)x[i].scales;
+ aux16[0] = a[0] & 0x0f0f;
+ aux16[1] = (a[0] >> 4) & 0x0f0f;
+
+ const int32_t summi = scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]);
+ sum_mins += y[i].d * (float)x[i].d[1] * summi;
+
+ const float d = y[i].d * (float)x[i].d[0];
+
+ const uint8x16x2_t q4bits = vld1q_u8_x2(q4);
+
+#ifdef __ARM_FEATURE_DOTPROD
+ q8bytes = vld1q_s8_x4(q8);
+ q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
+
+ const int32x4_t p1 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
+ const int32_t sumi1 = vaddvq_s32(p1) * scales[0];
+
+ q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
+
+ const int32x4_t p2 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]);
+ const int32_t sumi2 = vaddvq_s32(p2) * scales[1];
+
+#else
+ q8bytes = vld1q_s8_x4(q8);
+ q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
+ const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ int32_t sumi1 = vaddvq_s16(vaddq_s16(p0, p1)) * scales[0];
+
+ q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[2])));
+ const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[3])));
+ int32_t sumi2 = vaddvq_s16(vaddq_s16(p2, p3)) * scales[1];
+
+#endif
+ sumf += d * (sumi1 + sumi2);
+
+ }
+
+ *s = sumf - sum_mins;
+
+#elif defined __AVX2__
+
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ float summs = 0;
+
+ uint16_t aux16[2];
+ const uint8_t * scales = (const uint8_t *)aux16;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
+ const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
+ const __m256 vd = _mm256_set1_ps(d);
+
+ const uint16_t * a = (const uint16_t *)x[i].scales;
+ aux16[0] = a[0] & 0x0f0f;
+ aux16[1] = (a[0] >> 4) & 0x0f0f;
+
+ summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
+ const __m256i q4l = _mm256_and_si256(q4bits, m4);
+ const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
+
+ const __m256i q8l = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8h = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ const __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
+ const __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
+
+ const __m256i p32l = _mm256_madd_epi16(_mm256_set1_epi16(scales[0]), p16l);
+ acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32l), acc);
+
+ const __m256i p32h = _mm256_madd_epi16(_mm256_set1_epi16(scales[1]), p16h);
+ acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32h), acc);
+
+ }
+
+ *s = hsum_float_8(acc) - summs;
+
+#elif defined __AVX__
+
+ const __m128i m4 = _mm_set1_epi8(0xF);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ float summs = 0;
+
+ uint16_t aux16[2];
+ const uint8_t * scales = (const uint8_t *)aux16;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
+ const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
+ const __m256 vd = _mm256_set1_ps(d);
+
+ const uint16_t * a = (const uint16_t *)x[i].scales;
+ aux16[0] = a[0] & 0x0f0f;
+ aux16[1] = (a[0] >> 4) & 0x0f0f;
+
+ summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
+ const __m128i q4bits_0 = _mm256_extractf128_si256(q4bits, 0);
+ const __m128i q4bits_1 = _mm256_extractf128_si256(q4bits, 1);
+ const __m128i q4_0 = _mm_and_si128(q4bits_0, m4);
+ const __m128i q4_1 = _mm_and_si128(q4bits_1, m4);
+ const __m128i q4_2 = _mm_and_si128(_mm_srli_epi16(q4bits_0, 4), m4);
+ const __m128i q4_3 = _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ const __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
+ const __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
+ const __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
+ const __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
+
+ const __m128i p32_0 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_0);
+ const __m128i p32_1 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_1);
+ acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_1, p32_0))), acc);
+
+ const __m128i p32_2 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_2);
+ const __m128i p32_3 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_3);
+ acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_3, p32_2))), acc);
+
+ }
+
+ *s = hsum_float_8(acc) - summs;
+
+#elif defined __riscv_v_intrinsic
+
+ uint16_t s16[2];
+ const uint8_t * restrict scales = (const uint8_t *)s16;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint16_t * restrict b = (const uint16_t *)x[i].scales;
+ s16[0] = b[0] & 0x0f0f;
+ s16[1] = (b[0] >> 4) & 0x0f0f;
+
+ sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
+
+ size_t vl = 32;
+
+ vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
+
+ // load Q4
+ vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
+
+ // load Q8 and multiply it with lower Q4 nibble
+ vint8m1_t q4_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
+ vint16m2_t va_0 = __riscv_vwmul_vv_i16m2(q4_a, __riscv_vle8_v_i8m1(q8, vl), vl);
+ vint16m1_t aux1 = __riscv_vredsum_vs_i16m2_i16m1(va_0, vzero, vl);
+
+ sumf += d*scales[0]*__riscv_vmv_x_s_i16m1_i16(aux1);
+
+ // load Q8 and multiply it with upper Q4 nibble
+ vint8m1_t q4_s = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
+ vint16m2_t va_1 = __riscv_vwmul_vv_i16m2(q4_s, __riscv_vle8_v_i8m1(q8+32, vl), vl);
+ vint16m1_t aux2 = __riscv_vredsum_vs_i16m2_i16m1(va_1, vzero, vl);
+
+ sumf += d*scales[1]*__riscv_vmv_x_s_i16m1_i16(aux2);
+
+ }
+
+ *s = sumf;
+
+#else
+
+ uint8_t aux8[QK_K];
+ int16_t aux16[16];
+ float sums [8];
+ memset(sums, 0, 8*sizeof(float));
+
+ uint16_t s16[2];
+ const uint8_t * restrict scales = (const uint8_t *)s16;
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+ uint8_t * restrict a = aux8;
+ for (int l = 0; l < 32; ++l) a[l+ 0] = q4[l] & 0xF;
+ for (int l = 0; l < 32; ++l) a[l+32] = q4[l] >> 4;
+
+ const uint16_t * restrict b = (const uint16_t *)x[i].scales;
+ s16[0] = b[0] & 0x0f0f;
+ s16[1] = (b[0] >> 4) & 0x0f0f;
+
+ sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
+
+ for (int j = 0; j < QK_K/32; ++j) {
+ for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
+ q8 += 16; a += 16;
+ for (int l = 0; l < 16; ++l) aux16[l] += q8[l] * a[l];
+ q8 += 16; a += 16;
+ const float dl = d * scales[j];
+ for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[l+8]);
+ }
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+#endif
+}
+#endif
+
+#if QK_K == 256
+void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q5_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+ static const uint32_t kmask1 = 0x3f3f3f3f;
+ static const uint32_t kmask2 = 0x0f0f0f0f;
+ static const uint32_t kmask3 = 0x03030303;
+
+ uint32_t utmp[4];
+
+
+#ifdef __ARM_NEON
+
+ const uint8x16_t m4b = vdupq_n_u8(0xf);
+ const uint8x16_t mone = vdupq_n_u8(1);
+ const uint8x16_t mtwo = vdupq_n_u8(2);
+#if defined(__ARM_FEATURE_DOTPROD)
+ const int32x4_t mzero = vdupq_n_s32(0);
+#endif
+
+ int8x16x4_t q5bytes;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8);
+ const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8));
+ const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
+ vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
+ int32_t sumi_mins = vaddvq_s32(prod);
+
+ const uint8_t * scales = (const uint8_t *)utmp;
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ uint8x16x2_t qhbits = vld1q_u8_x2(qh);
+
+ uint8x16x4_t q5h;
+
+ int32_t sumi = 0;
+
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const uint8x16x2_t q5bits = vld1q_u8_x2(q5); q5 += 32;
+ const int8x16x4_t q8bytes = vld1q_s8_x4(q8); q8 += 64;
+
+ q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
+ q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
+ q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3);
+ q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3);
+ qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2);
+ qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2);
+
+ q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0]));
+ q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1]));
+ q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2]));
+ q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+
+ sumi += vaddvq_s32(vdotq_s32(vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++;
+ sumi += vaddvq_s32(vdotq_s32(vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++;
+#else
+
+ const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q5bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q5bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ sumi += vaddvq_s16(vaddq_s16(p0, p1)) * *scales++;
+
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q5bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q5bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ sumi += vaddvq_s16(vaddq_s16(p2, p3)) * *scales++;
+#endif
+ }
+
+ sumf += d * sumi - dmin * sumi_mins;
+
+ }
+
+ *s = sumf;
+
+#elif defined __AVX2__
+
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+ const __m128i mzero = _mm_setzero_si128();
+ const __m256i mone = _mm256_set1_epi8(1);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ float summs = 0.f;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+#if QK_K == 256
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+#else
+ // TODO
+ const float d = 0, dmin = 0;
+#endif
+
+ const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
+
+ const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
+ const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
+ const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
+ const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
+ summs += dmin * _mm_extract_epi32(hsum, 0);
+
+ const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
+ const __m256i scales = MM256_SET_M128I(sc128, sc128);
+
+ const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh);
+ __m256i hmask = mone;
+
+ __m256i sumi = _mm256_setzero_si256();
+
+ int bit = 0;
+
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
+ const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
+
+ const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32;
+
+ const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
+ const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
+ const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0);
+ hmask = _mm256_slli_epi16(hmask, 1);
+
+ const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
+ const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
+ const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1);
+ hmask = _mm256_slli_epi16(hmask, 1);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+
+ __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0);
+ __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1);
+
+ p16_0 = _mm256_madd_epi16(scale_0, p16_0);
+ p16_1 = _mm256_madd_epi16(scale_1, p16_1);
+
+ sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
+
+ }
+
+ __m256 vd = _mm256_set1_ps(d);
+ acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
+
+ }
+
+ *s = hsum_float_8(acc) + summs;
+
+#elif defined __AVX__
+
+ const __m128i m4 = _mm_set1_epi8(0xF);
+ const __m128i mzero = _mm_setzero_si128();
+ const __m128i mone = _mm_set1_epi8(1);
+ const __m128i m2 = _mm_set1_epi8(2);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ float summs = 0.f;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
+ const __m128i scales = _mm_cvtepu8_epi16(utmps);
+ const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
+
+ const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
+ const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
+ const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
+ const __m128i prod = _mm_madd_epi16(mins, q8s);
+ const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
+ summs += dmin * _mm_extract_epi32(hsum, 0);
+
+ const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]);
+ const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]);
+ __m128i hmask = mone;
+
+ __m128i sumi_0 = _mm_setzero_si128();
+ __m128i sumi_1 = _mm_setzero_si128();
+
+ int bit = 0;
+
+ __m128i shuffle = _mm_set1_epi16(0x0100);
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi16(shuffle, m2);
+
+ const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
+ const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
+
+ __m128i q5l_0 = _mm_and_si128(q5bits_0, m4);
+ __m128i q5l_1 = _mm_and_si128(q5bits_1, m4);
+ __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
+ __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
+ __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0);
+ __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1);
+ hmask = _mm_slli_epi16(hmask, 1);
+
+ __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0);
+ __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1);
+ p16_0 = _mm_madd_epi16(scale_0, p16_0);
+ p16_1 = _mm_madd_epi16(scale_0, p16_1);
+
+ q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4);
+ q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4);
+ q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
+ q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
+ q5_0 = _mm_add_epi8(q5l_0, q5h_0);
+ q5_1 = _mm_add_epi8(q5l_1, q5h_1);
+ hmask = _mm_slli_epi16(hmask, 1);
+
+ q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0);
+ __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1);
+ p16_2 = _mm_madd_epi16(scale_1, p16_2);
+ p16_3 = _mm_madd_epi16(scale_1, p16_3);
+
+ sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
+ sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
+
+ }
+
+ __m256 vd = _mm256_set1_ps(d);
+ __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
+ acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
+
+ }
+
+ *s = hsum_float_8(acc) + summs;
+
+#elif defined __riscv_v_intrinsic
+
+ const uint8_t * scales = (const uint8_t*)&utmp[0];
+ const uint8_t * mins = (const uint8_t*)&utmp[2];
+
+ float sumf = 0;
+ float sums = 0.0;
+
+ size_t vl;
+
+ for (int i = 0; i < nb; ++i) {
+
+ vl = 8;
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const uint8_t * restrict hm = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
+
+ vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
+ vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
+ vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
+ vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
+ vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
+
+ vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
+ sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
+
+ vl = 32;
+ int32_t aux32 = 0;
+ int is = 0;
+
+ uint8_t m = 1;
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+ vuint8m1_t vqh = __riscv_vle8_v_u8m1(hm, vl);
+
+ for (int j = 0; j < QK_K/64; ++j) {
+ // load Q5 and Q8
+ vuint8m1_t q5_x = __riscv_vle8_v_u8m1(q5, vl);
+ vint8m1_t q8_y1 = __riscv_vle8_v_i8m1(q8, vl);
+ vint8m1_t q8_y2 = __riscv_vle8_v_i8m1(q8+32, vl);
+
+ // compute mask for addition
+ vint8m1_t q5_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q5_x, 0x0F, vl));
+ vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
+ vbool8_t vmask_1 = __riscv_vmsne_vx_u8m1_b8(qh_m1, 0, vl);
+ vint8m1_t q5_m1 = __riscv_vadd_vx_i8m1_m(vmask_1, q5_a, 16, vl);
+ m <<= 1;
+
+ vint8m1_t q5_l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q5_x, 0x04, vl));
+ vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
+ vbool8_t vmask_2 = __riscv_vmsne_vx_u8m1_b8(qh_m2, 0, vl);
+ vint8m1_t q5_m2 = __riscv_vadd_vx_i8m1_m(vmask_2, q5_l, 16, vl);
+ m <<= 1;
+
+ vint16m2_t v0 = __riscv_vwmul_vv_i16m2(q5_m1, q8_y1, vl);
+ vint16m2_t v1 = __riscv_vwmul_vv_i16m2(q5_m2, q8_y2, vl);
+
+ vint32m4_t vs1 = __riscv_vwmul_vx_i32m4(v0, scales[is++], vl);
+ vint32m4_t vs2 = __riscv_vwmul_vx_i32m4(v1, scales[is++], vl);
+
+ vint32m1_t vacc1 = __riscv_vredsum_vs_i32m4_i32m1(vs1, vzero, vl);
+ vint32m1_t vacc2 = __riscv_vredsum_vs_i32m4_i32m1(vs2, vzero, vl);
+
+ aux32 += __riscv_vmv_x_s_i32m1_i32(vacc1) + __riscv_vmv_x_s_i32m1_i32(vacc2);
+ q5 += 32; q8 += 64;
+
+ }
+
+ vfloat32m1_t vaux = __riscv_vfmul_vf_f32m1(__riscv_vfmv_v_f_f32m1(aux32, 1), d, 1);
+ sums += __riscv_vfmv_f_s_f32m1_f32(vaux);
+
+ }
+
+ *s = sumf+sums;
+
+#else
+
+ const uint8_t * scales = (const uint8_t*)&utmp[0];
+ const uint8_t * mins = (const uint8_t*)&utmp[2];
+
+ int8_t aux8[QK_K];
+ int16_t aux16[8];
+ float sums [8];
+ int32_t aux32[8];
+ memset(sums, 0, 8*sizeof(float));
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q4 = x[i].qs;
+ const uint8_t * restrict hm = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+ memset(aux32, 0, 8*sizeof(int32_t));
+ int8_t * restrict a = aux8;
+ uint8_t m = 1;
+ for (int j = 0; j < QK_K/64; ++j) {
+ for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
+ for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
+ a += 32; m <<= 1;
+ for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
+ for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
+ a += 32; m <<= 1;
+ q4 += 32;
+ }
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ int sumi = 0;
+ for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
+ a = aux8;
+ int is = 0;
+ for (int j = 0; j < QK_K/32; ++j) {
+ int32_t scale = scales[is++];
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ }
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
+ const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
+ sumf -= dmin * sumi;
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+#endif
+}
+
+#else
+
+void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q5_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ const uint8x16_t m4b = vdupq_n_u8(0xf);
+ const uint8x16_t mh = vdupq_n_u8(16);
+#if defined(__ARM_FEATURE_DOTPROD)
+ const int32x4_t mzero = vdupq_n_s32(0);
+#endif
+
+ int8x16x4_t q5bytes;
+ uint8x16x4_t q5h;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * (float)x[i].d;
+ const int8_t * sc = x[i].scales;
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint8x8_t qhbits = vld1_u8(qh);
+
+ const uint8x16x2_t q5bits = vld1q_u8_x2(q5);
+ const int8x16x4_t q8bytes = vld1q_s8_x4(q8);
+
+ const uint8x16_t htmp = vcombine_u8(qhbits, vshr_n_u8(qhbits, 1));
+ q5h.val[0] = vbicq_u8(mh, vshlq_n_u8(htmp, 4));
+ q5h.val[1] = vbicq_u8(mh, vshlq_n_u8(htmp, 2));
+ q5h.val[2] = vbicq_u8(mh, htmp);
+ q5h.val[3] = vbicq_u8(mh, vshrq_n_u8(htmp, 2));
+
+ q5bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[0], m4b)), vreinterpretq_s8_u8(q5h.val[0]));
+ q5bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[1], m4b)), vreinterpretq_s8_u8(q5h.val[1]));
+ q5bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[0], 4)), vreinterpretq_s8_u8(q5h.val[2]));
+ q5bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[1], 4)), vreinterpretq_s8_u8(q5h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+
+ int32_t sumi1 = sc[0] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]));
+ int32_t sumi2 = sc[1] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[1], q8bytes.val[1]));
+ int32_t sumi3 = sc[2] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]));
+ int32_t sumi4 = sc[3] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[3], q8bytes.val[3]));
+
+ sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
+
+#else
+
+ const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q5bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q5bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ int32_t sumi = sc[0] * vaddvq_s16(p0) + sc[1] * vaddvq_s16(p1);
+
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q5bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q5bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ sumi += sc[2] * vaddvq_s16(p2) + sc[3] * vaddvq_s16(p3);
+
+ sumf += d*sumi;
+#endif
+
+ }
+
+ *s = sumf;
+
+#elif defined __AVX2__
+
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+ const __m256i mone = _mm256_set1_epi8(1);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
+
+ const __m256i scale_l = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
+ const __m256i scale_h = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
+
+ int64_t aux64;
+ memcpy(&aux64, x[i].qh, 8);
+ const __m128i haux128 = _mm_set_epi64x(aux64 >> 1, aux64);
+ const __m256i haux256 = MM256_SET_M128I(_mm_srli_epi16(haux128, 2), haux128);
+
+ const __m256i q5h_0 = _mm256_slli_epi16(_mm256_andnot_si256(haux256, mone), 4);
+ const __m256i q5h_1 = _mm256_slli_epi16(_mm256_andnot_si256(_mm256_srli_epi16(haux256, 4), mone), 4);
+
+ const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
+ const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ const __m256i p16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5l_0, q8_0));
+ const __m256i p16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5l_1, q8_1));
+ const __m256i s16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5h_0, q8_0));
+ const __m256i s16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5h_1, q8_1));
+
+ const __m256i dot = _mm256_sub_epi32(_mm256_add_epi32(p16_0, p16_1), _mm256_add_epi32(s16_0, s16_1));
+
+ acc = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(dot), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __AVX__
+
+ const __m128i m4 = _mm_set1_epi8(0xF);
+ const __m128i mone = _mm_set1_epi8(1);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
+
+ const __m128i scale_0 = _mm_set1_epi16(x[i].scales[0]);
+ const __m128i scale_1 = _mm_set1_epi16(x[i].scales[1]);
+ const __m128i scale_2 = _mm_set1_epi16(x[i].scales[2]);
+ const __m128i scale_3 = _mm_set1_epi16(x[i].scales[3]);
+
+ int64_t aux64;
+ memcpy(&aux64, x[i].qh, 8);
+ const __m128i haux128_0 = _mm_set_epi64x(aux64 >> 1, aux64);
+ const __m128i haux128_1 = _mm_srli_epi16(haux128_0, 2);
+
+ const __m128i q5h_0 = _mm_slli_epi16(_mm_andnot_si128(haux128_0, mone), 4);
+ const __m128i q5h_1 = _mm_slli_epi16(_mm_andnot_si128(haux128_1, mone), 4);
+ const __m128i q5h_2 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_0, 4), mone), 4);
+ const __m128i q5h_3 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_1, 4), mone), 4);
+
+ const __m128i q5l_0 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 0), m4);
+ const __m128i q5l_1 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 1), m4);
+ const __m128i q5l_2 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 0), 4), m4);
+ const __m128i q5l_3 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 1), 4), m4);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ const __m128i p16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5l_0, _mm256_extractf128_si256(q8_0, 0)));
+ const __m128i p16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5l_1, _mm256_extractf128_si256(q8_0, 1)));
+ const __m128i p16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5l_2, _mm256_extractf128_si256(q8_1, 0)));
+ const __m128i p16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5l_3, _mm256_extractf128_si256(q8_1, 1)));
+ const __m128i s16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5h_0, _mm256_extractf128_si256(q8_0, 0)));
+ const __m128i s16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5h_1, _mm256_extractf128_si256(q8_0, 1)));
+ const __m128i s16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5h_2, _mm256_extractf128_si256(q8_1, 0)));
+ const __m128i s16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5h_3, _mm256_extractf128_si256(q8_1, 1)));
+
+ const __m128i dot_0 = _mm_sub_epi32(_mm_add_epi32(p16_0, p16_2), _mm_add_epi32(s16_0, s16_2));
+ const __m128i dot_1 = _mm_sub_epi32(_mm_add_epi32(p16_1, p16_3), _mm_add_epi32(s16_1, s16_3));
+
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(dot_1, dot_0))), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __riscv_v_intrinsic
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * (float)x[i].d;
+ const int8_t * sc = x[i].scales;
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+
+ // load qh
+ vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(qh, 8);
+ vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
+
+ size_t vl = 16;
+
+ // combine both qh_1 and qh_2
+ vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
+
+ vuint8mf2_t qh_h0 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
+ vuint8mf2_t qh_h1 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), vl), 16, vl);
+ vuint8mf2_t qh_h2 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(qh_x, vl), 16, vl);
+ vuint8mf2_t qh_h3 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
+
+ vint8mf2_t qh_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h0);
+ vint8mf2_t qh_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h1);
+ vint8mf2_t qh_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h2);
+ vint8mf2_t qh_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h3);
+
+ // load q5
+ vuint8mf2_t q5_x1 = __riscv_vle8_v_u8mf2(q5, vl);
+ vuint8mf2_t q5_x2 = __riscv_vle8_v_u8mf2(q5+16, vl);
+
+ vint8mf2_t q5s_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x1, 0xF, vl));
+ vint8mf2_t q5s_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x2, 0xF, vl));
+ vint8mf2_t q5s_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x1, 0x4, vl));
+ vint8mf2_t q5s_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x2, 0x4, vl));
+
+ vint8mf2_t q5_0 = __riscv_vsub_vv_i8mf2(q5s_0, qh_0, vl);
+ vint8mf2_t q5_1 = __riscv_vsub_vv_i8mf2(q5s_1, qh_1, vl);
+ vint8mf2_t q5_2 = __riscv_vsub_vv_i8mf2(q5s_2, qh_2, vl);
+ vint8mf2_t q5_3 = __riscv_vsub_vv_i8mf2(q5s_3, qh_3, vl);
+
+ // load Q8 and multiply it with Q5
+ vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q5_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
+ vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q5_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
+ vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q5_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
+ vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q5_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
+
+ vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
+ vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
+ vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
+ vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
+
+ int32_t sumi1 = sc[0] * __riscv_vmv_x_s_i32m1_i32(vs_0);
+ int32_t sumi2 = sc[1] * __riscv_vmv_x_s_i32m1_i32(vs_1);
+ int32_t sumi3 = sc[2] * __riscv_vmv_x_s_i32m1_i32(vs_2);
+ int32_t sumi4 = sc[3] * __riscv_vmv_x_s_i32m1_i32(vs_3);
+
+ sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
+
+ }
+
+ *s = sumf;
+
+#else
+
+ int8_t aux8[QK_K];
+ int16_t aux16[16];
+ float sums [8];
+ memset(sums, 0, 8*sizeof(float));
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q4 = x[i].qs;
+ const uint8_t * restrict hm = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+ int8_t * restrict a = aux8;
+ for (int l = 0; l < 32; ++l) {
+ a[l+ 0] = q4[l] & 0xF;
+ a[l+32] = q4[l] >> 4;
+ }
+ for (int is = 0; is < 8; ++is) {
+ uint8_t m = 1 << is;
+ for (int l = 0; l < 8; ++l) a[8*is + l] -= (hm[l] & m ? 0 : 16);
+ }
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const int8_t * restrict sc = x[i].scales;
+
+ for (int j = 0; j < QK_K/16; ++j) {
+ const float dl = d * sc[j];
+ for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[8+l]);
+ q8 += 16; a += 16;
+ }
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+#endif
+}
+#endif
+
+
+#if QK_K == 256
+void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q6_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ float sum = 0;
+
+ const uint8x16_t m4b = vdupq_n_u8(0xF);
+#if defined(__ARM_FEATURE_DOTPROD)
+ const int32x4_t vzero = vdupq_n_s32(0);
+#endif
+ //const int8x16_t m32s = vdupq_n_s8(32);
+
+ const uint8x16_t mone = vdupq_n_u8(3);
+
+ int8x16x4_t q6bytes;
+ uint8x16x4_t q6h;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d_all = GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q6 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const int8_t * restrict scale = x[i].scales;
+
+ const int16x8x2_t q8sums = vld1q_s16_x2(y[i].bsums);
+ const int8x16_t scales = vld1q_s8(scale);
+ const int16x8x2_t q6scales = {vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))};
+
+ const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])),
+ vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))),
+ vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])),
+ vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1]))));
+ int32_t isum_mins = vaddvq_s32(prod);
+
+ int32_t isum = 0;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ uint8x16x2_t qhbits = vld1q_u8_x2(qh); qh += 32;
+ uint8x16x4_t q6bits = vld1q_u8_x4(q6); q6 += 64;
+ int8x16x4_t q8bytes = vld1q_s8_x4(q8); q8 += 64;
+
+ q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
+ q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
+ uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2);
+ q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits.val[1], 2);
+ q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+
+ //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
+ //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
+ //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s);
+ //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s);
+ q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0]));
+ q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1]));
+ q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2]));
+ q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+
+ isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
+ scale += 4;
+
+#else
+
+ int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1];
+ scale += 2;
+
+ int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ isum += vaddvq_s16(p2) * scale[0] + vaddvq_s16(p3) * scale[1];
+ scale += 2;
+#endif
+
+ q8bytes = vld1q_s8_x4(q8); q8 += 64;
+
+ shifted = vshrq_n_u8(qhbits.val[0], 4);
+ q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits.val[1], 4);
+ q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits.val[0], 6);
+ q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits.val[1], 6);
+ q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+
+ //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s);
+ //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s);
+ //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s);
+ //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s);
+ q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0]));
+ q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1]));
+ q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2]));
+ q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+
+ isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
+ scale += 4;
+
+ //for (int l = 0; l < 4; ++l) {
+ // const int32x4_t p = vdotq_s32(vzero, q6bytes.val[l], q8bytes.val[l]);
+ // isum += vaddvq_s32(p) * *scale++;
+ //}
+#else
+ p0 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ p1 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1];
+ scale += 2;
+
+ p2 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ p3 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ isum += vaddvq_s16(p2) * scale[0] + vaddvq_s16(p3) * scale[1];
+ scale += 2;
+#endif
+
+ }
+ //sum += isum * d_all * y[i].d;
+ sum += d_all * y[i].d * (isum - 32 * isum_mins);
+
+ }
+ *s = sum;
+
+#elif defined __AVX2__
+
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+ const __m256i m2 = _mm256_set1_epi8(3);
+ const __m256i m32s = _mm256_set1_epi8(32);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q4 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
+
+ __m256i sumi = _mm256_setzero_si256();
+
+ int is = 0;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0));
+ const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1));
+ const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2));
+ const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3));
+ is += 4;
+
+ const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
+ const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
+ const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32;
+
+ const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4);
+ const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4);
+ const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4);
+ const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4);
+
+ const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
+ const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1);
+ const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2);
+ const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+
+ __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
+ __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
+ __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2);
+ __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3);
+
+ __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
+ __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
+ __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2);
+ __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3);
+
+ p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
+ p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
+ p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
+
+ p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
+ p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
+ p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2);
+ p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3);
+
+ sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
+ sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3));
+
+ }
+
+ acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __AVX__
+
+ const __m128i m4 = _mm_set1_epi8(0xF);
+ const __m128i m3 = _mm_set1_epi8(3);
+ const __m128i m32s = _mm_set1_epi8(32);
+ const __m128i m2 = _mm_set1_epi8(2);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q4 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
+
+ __m128i sumi_0 = _mm_setzero_si128();
+ __m128i sumi_1 = _mm_setzero_si128();
+
+ __m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
+ const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
+
+ const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
+ const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
+ const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4);
+ const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4);
+ const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4);
+ const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4);
+ const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4);
+ const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4);
+
+ const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
+ const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
+ const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
+ const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
+
+ const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0);
+ const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1);
+ const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2);
+ const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3);
+ const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4);
+ const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5);
+ const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6);
+ const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7);
+
+ const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+
+ __m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0);
+ __m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1);
+ __m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2);
+ __m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3);
+ __m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4);
+ __m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5);
+ __m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6);
+ __m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7);
+
+ __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
+ __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
+ __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
+ __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3);
+ __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4);
+ __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5);
+ __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
+ __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
+
+ p16_0 = _mm_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm_sub_epi16(p16_1, q8s_1);
+ p16_2 = _mm_sub_epi16(p16_2, q8s_2);
+ p16_3 = _mm_sub_epi16(p16_3, q8s_3);
+ p16_4 = _mm_sub_epi16(p16_4, q8s_4);
+ p16_5 = _mm_sub_epi16(p16_5, q8s_5);
+ p16_6 = _mm_sub_epi16(p16_6, q8s_6);
+ p16_7 = _mm_sub_epi16(p16_7, q8s_7);
+
+ const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi8(shuffle, m2);
+ const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi8(shuffle, m2);
+ const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi8(shuffle, m2);
+ const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi8(shuffle, m2);
+
+ p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
+ p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
+ p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
+ p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
+ p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
+ p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5);
+ p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
+ p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7);
+
+ sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
+ sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
+ sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6));
+ sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7));
+
+ }
+
+ __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __riscv_v_intrinsic
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+
+ const uint8_t * restrict q6 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const int8_t * restrict scale = x[i].scales;
+
+ size_t vl;
+
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+
+ int sum_t = 0;
+ int is = 0;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ vl = 32;
+
+ // load qh
+ vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl);
+
+ // load Q6
+ vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl);
+ vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl);
+
+ vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl);
+ vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl);
+ vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl);
+ vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl);
+
+ vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl);
+ vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl);
+ vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl);
+ vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl);
+
+ vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl);
+ vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl);
+ vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl);
+ vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl);
+
+ vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl);
+ vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl);
+ vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl);
+ vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl);
+
+ // load Q8 and take product
+ vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl);
+ vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
+ vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
+ vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
+
+ vl = 16;
+
+ vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl);
+ vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl);
+ vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl);
+ vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl);
+ vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl);
+ vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl);
+ vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl);
+ vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl);
+
+ vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl);
+ vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl);
+ vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl);
+ vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl);
+
+ sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
+
+ q6 += 64; qh += 32; q8 += 128; is=8;
+
+ }
+
+ sumf += d * sum_t;
+
+ }
+
+ *s = sumf;
+
+#else
+
+ int8_t aux8[QK_K];
+ int16_t aux16[8];
+ float sums [8];
+ int32_t aux32[8];
+ memset(sums, 0, 8*sizeof(float));
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q4 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+ memset(aux32, 0, 8*sizeof(int32_t));
+ int8_t * restrict a = aux8;
+ for (int j = 0; j < QK_K; j += 128) {
+ for (int l = 0; l < 32; ++l) {
+ a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
+ a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
+ a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
+ a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
+ }
+ a += 128;
+ q4 += 64;
+ qh += 32;
+ }
+ a = aux8;
+ int is = 0;
+ for (int j = 0; j < QK_K/16; ++j) {
+ int scale = x[i].scales[is++];
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ }
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+#endif
+}
+
+#else
+
+void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q6_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ float sum = 0;
+
+ const uint8x16_t m4b = vdupq_n_u8(0xF);
+ const int8x16_t m32s = vdupq_n_s8(32);
+#if defined(__ARM_FEATURE_DOTPROD)
+ const int32x4_t vzero = vdupq_n_s32(0);
+#endif
+
+ const uint8x16_t mone = vdupq_n_u8(3);
+
+ int8x16x4_t q6bytes;
+ uint8x16x4_t q6h;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d_all = (float)x[i].d;
+
+ const uint8_t * restrict q6 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const int8_t * restrict scale = x[i].scales;
+
+ int32_t isum = 0;
+
+ uint8x16_t qhbits = vld1q_u8(qh);
+ uint8x16x2_t q6bits = vld1q_u8_x2(q6);
+ int8x16x4_t q8bytes = vld1q_s8_x4(q8);
+
+ q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4);
+ uint8x16_t shifted = vshrq_n_u8(qhbits, 2);
+ q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits, 4);
+ q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits, 6);
+ q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+
+ q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
+ q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
+ q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s);
+ q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+
+ isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
+#else
+
+ int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1];
+
+ int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ isum += vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3];
+#endif
+
+ sum += isum * d_all * y[i].d;
+
+ }
+ *s = sum;
+
+#elif defined __AVX2__
+
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+ const __m256i m2 = _mm256_set1_epi8(3);
+ const __m256i m32s = _mm256_set1_epi8(32);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q4 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
+ const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
+ const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
+ const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
+
+ __m256i sumi = _mm256_setzero_si256();
+
+ const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
+ const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
+
+ const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
+ const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
+
+ const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
+ const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
+
+ const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
+ const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
+ __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
+
+ __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
+ __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
+
+ p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
+
+ p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
+ p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
+
+ sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
+
+ acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __AVX__
+
+ const __m128i m4 = _mm_set1_epi8(0xF);
+ const __m128i m2 = _mm_set1_epi8(3);
+ const __m128i m32s = _mm_set1_epi8(32);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q4 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
+ const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
+ const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
+ const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
+
+ __m128i sumi_0 = _mm_setzero_si128();
+ __m128i sumi_1 = _mm_setzero_si128();
+
+ const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
+ const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
+
+ const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
+ const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
+
+ const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4);
+ const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4);
+ const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4);
+ const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4);
+
+ const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0);
+ const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1);
+ const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2);
+ const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ __m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0));
+ __m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1));
+ __m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0));
+ __m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1));
+
+ __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
+ __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
+ __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
+ __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
+
+ p16_0 = _mm_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm_sub_epi16(p16_1, q8s_1);
+ p16_2 = _mm_sub_epi16(p16_2, q8s_2);
+ p16_3 = _mm_sub_epi16(p16_3, q8s_3);
+
+ p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
+ p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
+ p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
+ p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
+
+ sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
+ sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
+
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc);
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __riscv_v_intrinsic
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d_all = (float)x[i].d;
+
+ const uint8_t * restrict q6 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const int8_t * restrict scale = x[i].scales;
+
+ int32_t isum = 0;
+
+ size_t vl = 16;
+
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+
+ // load Q6
+ vuint8mf2_t q6_0 = __riscv_vle8_v_u8mf2(q6, vl);
+ vuint8mf2_t q6_1 = __riscv_vle8_v_u8mf2(q6+16, vl);
+
+ // load qh
+ vuint8mf2_t qh_x = __riscv_vle8_v_u8mf2(qh, vl);
+
+ vuint8mf2_t qh0 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
+ qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
+ vuint8mf2_t qh1 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
+ qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
+ vuint8mf2_t qh2 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
+ qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
+ vuint8mf2_t qh3 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
+
+ vuint8mf2_t q6h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_0, 0xF, vl), qh0, vl);
+ vuint8mf2_t q6h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_1, 0xF, vl), qh1, vl);
+ vuint8mf2_t q6h_2 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_0, 0x4, vl), qh2, vl);
+ vuint8mf2_t q6h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_1, 0x4, vl), qh3, vl);
+
+ vint8mf2_t q6v_0 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_0), 32, vl);
+ vint8mf2_t q6v_1 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_1), 32, vl);
+ vint8mf2_t q6v_2 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_2), 32, vl);
+ vint8mf2_t q6v_3 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_3), 32, vl);
+
+ // load Q8 and take product
+ vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q6v_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
+ vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q6v_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
+ vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q6v_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
+ vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q6v_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
+
+ vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
+ vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
+ vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
+ vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
+
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scale[0];
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scale[1];
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scale[2];
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scale[3];
+
+ sumf += isum * d_all * y[i].d;
+
+ }
+
+ *s = sumf;
+
+#else
+
+ int8_t aux8[QK_K];
+ int16_t aux16[8];
+ float sums [8];
+ int32_t aux32[8];
+ memset(sums, 0, 8*sizeof(float));
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q4 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+ memset(aux32, 0, 8*sizeof(int32_t));
+ int8_t * restrict a = aux8;
+ for (int l = 0; l < 16; ++l) {
+ a[l+ 0] = (int8_t)((q4[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
+ a[l+16] = (int8_t)((q4[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
+ a[l+32] = (int8_t)((q4[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
+ a[l+48] = (int8_t)((q4[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
+ }
+ int is = 0;
+ for (int j = 0; j < QK_K/16; ++j) {
+ int scale = x[i].scales[is++];
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ }
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+#endif
+}
+
+#endif
--- /dev/null
+#pragma once
+
+#include "ggml-impl.h"
+
+// GGML internal header
+
+#include <stdint.h>
+#include <stddef.h>
+
+#define QK4_0 32
+typedef struct {
+ ggml_fp16_t d; // delta
+ uint8_t qs[QK4_0 / 2]; // nibbles / quants
+} block_q4_0;
+static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
+
+#define QK4_1 32
+typedef struct {
+ ggml_fp16_t d; // delta
+ ggml_fp16_t m; // min
+ uint8_t qs[QK4_1 / 2]; // nibbles / quants
+} block_q4_1;
+static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding");
+
+#define QK5_0 32
+typedef struct {
+ ggml_fp16_t d; // delta
+ uint8_t qh[4]; // 5-th bit of quants
+ uint8_t qs[QK5_0 / 2]; // nibbles / quants
+} block_q5_0;
+static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
+
+#define QK5_1 32
+typedef struct {
+ ggml_fp16_t d; // delta
+ ggml_fp16_t m; // min
+ uint8_t qh[4]; // 5-th bit of quants
+ uint8_t qs[QK5_1 / 2]; // nibbles / quants
+} block_q5_1;
+static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
+
+#define QK8_0 32
+typedef struct {
+ ggml_fp16_t d; // delta
+ int8_t qs[QK8_0]; // quants
+} block_q8_0;
+static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
+
+#define QK8_1 32
+typedef struct {
+ float d; // delta
+ float s; // d * sum(qs[i])
+ int8_t qs[QK8_1]; // quants
+} block_q8_1;
+static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
+
+//
+// Super-block quantization structures
+//
+
+// Super-block size
+#ifdef GGML_QKK_64
+#define QK_K 64
+#define K_SCALE_SIZE 4
+#else
+#define QK_K 256
+#define K_SCALE_SIZE 12
+#endif
+
+// 2-bit quantization
+// weight is represented as x = a * q + b
+// 16 blocks of 16 elements each
+// Effectively 2.5625 bits per weight
+typedef struct {
+ uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
+ uint8_t qs[QK_K/4]; // quants
+ ggml_fp16_t d; // super-block scale for quantized scales
+ ggml_fp16_t dmin; // super-block scale for quantized mins
+} block_q2_K;
+static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
+
+// 3-bit quantization
+// weight is represented as x = a * q
+// 16 blocks of 16 elements each
+// Effectively 3.4375 bits per weight
+#ifdef GGML_QKK_64
+typedef struct {
+ uint8_t hmask[QK_K/8]; // quants - high bit
+ uint8_t qs[QK_K/4]; // quants - low 2 bits
+ uint8_t scales[2];
+ ggml_fp16_t d; // super-block scale
+} block_q3_K;
+static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 2, "wrong q3_K block size/padding");
+#else
+typedef struct {
+ uint8_t hmask[QK_K/8]; // quants - high bit
+ uint8_t qs[QK_K/4]; // quants - low 2 bits
+ uint8_t scales[12]; // scales, quantized with 6 bits
+ ggml_fp16_t d; // super-block scale
+} block_q3_K;
+static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding");
+#endif
+
+// 4-bit quantization
+// 8 blocks of 32 elements each
+// weight is represented as x = a * q + b
+// Effectively 4.5 bits per weight
+#ifdef GGML_QKK_64
+typedef struct {
+ ggml_fp16_t d[2]; // super-block scales/mins
+ uint8_t scales[2]; // 4-bit block scales/mins
+ uint8_t qs[QK_K/2]; // 4--bit quants
+} block_q4_K;
+static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + QK_K/2 + 2, "wrong q4_K block size/padding");
+#else
+typedef struct {
+ ggml_fp16_t d; // super-block scale for quantized scales
+ ggml_fp16_t dmin; // super-block scale for quantized mins
+ uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
+ uint8_t qs[QK_K/2]; // 4--bit quants
+} block_q4_K;
+static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2, "wrong q4_K block size/padding");
+#endif
+
+// 5-bit quantization
+// 8 blocks of 32 elements each
+// weight is represented as x = a * q + b
+// Effectively 5.5 bits per weight
+#ifdef GGML_QKK_64
+typedef struct {
+ ggml_fp16_t d; // super-block scale
+ int8_t scales[QK_K/16]; // 8-bit block scales
+ uint8_t qh[QK_K/8]; // quants, high bit
+ uint8_t qs[QK_K/2]; // quants, low 4 bits
+} block_q5_K;
+static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding");
+#else
+typedef struct {
+ ggml_fp16_t d; // super-block scale for quantized scales
+ ggml_fp16_t dmin; // super-block scale for quantized mins
+ uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
+ uint8_t qh[QK_K/8]; // quants, high bit
+ uint8_t qs[QK_K/2]; // quants, low 4 bits
+} block_q5_K;
+static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding");
+#endif
+
+// 6-bit quantization
+// weight is represented as x = a * q
+// 16 blocks of 16 elements each
+// Effectively 6.5625 bits per weight
+typedef struct {
+ uint8_t ql[QK_K/2]; // quants, lower 4 bits
+ uint8_t qh[QK_K/4]; // quants, upper 2 bits
+ int8_t scales[QK_K/16]; // scales, quantized with 8 bits
+ ggml_fp16_t d; // super-block scale
+} block_q6_K;
+static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + QK_K / 16 + 3*QK_K/4, "wrong q6_K block size/padding");
+
+// This is only used for intermediate quantization and dot products
+typedef struct {
+ float d; // delta
+ int8_t qs[QK_K]; // quants
+ int16_t bsums[QK_K/16]; // sum of quants in groups of 16
+} block_q8_K;
+static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding");
+
+
+// Quantization
+void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k);
+void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k);
+void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k);
+void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k);
+void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k);
+void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k);
+
+void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k);
+void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k);
+void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k);
+void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k);
+void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k);
+void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k);
+
+void quantize_row_q4_0(const float * restrict x, void * restrict y, int k);
+void quantize_row_q4_1(const float * restrict x, void * restrict y, int k);
+void quantize_row_q5_0(const float * restrict x, void * restrict y, int k);
+void quantize_row_q5_1(const float * restrict x, void * restrict y, int k);
+void quantize_row_q8_0(const float * restrict x, void * restrict y, int k);
+void quantize_row_q8_1(const float * restrict x, void * restrict y, int k);
+
+void quantize_row_q2_K(const float * restrict x, void * restrict y, int k);
+void quantize_row_q3_K(const float * restrict x, void * restrict y, int k);
+void quantize_row_q4_K(const float * restrict x, void * restrict y, int k);
+void quantize_row_q5_K(const float * restrict x, void * restrict y, int k);
+void quantize_row_q6_K(const float * restrict x, void * restrict y, int k);
+void quantize_row_q8_K(const float * restrict x, void * restrict y, int k);
+
+// Dequantization
+void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k);
+void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k);
+void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k);
+void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k);
+void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k);
+//void dequantize_row_q8_1(const block_q8_1 * restrict x, float * restrict y, int k);
+
+void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k);
+void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k);
+void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k);
+void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k);
+void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k);
+void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k);
+
+// Dot product
+void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+
+void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
params.seed = std::stoi(get_next_arg(i, argc, argv, arg, params));
} else if (arg == "-t" || arg == "--threads") {
params.n_threads = std::stoi(get_next_arg(i, argc, argv, arg, params));
- } else if (arg == "-ngl" || arg == "--gpu-layers" || arg == "--n-gpu-layers") {
- params.n_gpu_layers = std::stoi(get_next_arg(i, argc, argv, arg, params));
} else if (arg == "-p" || arg == "--prompt") {
params.prompt = get_next_arg(i, argc, argv, arg, params);
} else if (arg == "-n" || arg == "--n_predict") {
params.n_predict = std::stoi(get_next_arg(i, argc, argv, arg, params));
+ } else if (arg == "-np" || arg == "--n_parallel") {
+ params.n_parallel = std::stoi(get_next_arg(i, argc, argv, arg, params));
} else if (arg == "--top_k") {
params.top_k = std::stoi(get_next_arg(i, argc, argv, arg, params));
} else if (arg == "--top_p") {
params.repeat_penalty = std::stof(get_next_arg(i, argc, argv, arg, params));
} else if (arg == "-b" || arg == "--batch_size") {
params.n_batch= std::stoi(get_next_arg(i, argc, argv, arg, params));
+ } else if (arg == "-c" || arg == "--context") {
+ params.n_ctx= std::stoi(get_next_arg(i, argc, argv, arg, params));
+ } else if (arg == "-ngl" || arg == "--gpu-layers" || arg == "--n-gpu-layers") {
+ params.n_gpu_layers = std::stoi(get_next_arg(i, argc, argv, arg, params));
+ } else if (arg == "--ignore-eos") {
+ params.ignore_eos = true;
} else if (arg == "-m" || arg == "--model") {
params.model = get_next_arg(i, argc, argv, arg, params);
} else if (arg == "-i" || arg == "--interactive") {
fprintf(stderr, " -h, --help show this help message and exit\n");
fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n");
fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
- fprintf(stderr, " -ngl N, --gpu-layers N number of layers to offload to GPU on supported models (default: %d)\n", params.n_gpu_layers);
fprintf(stderr, " -p PROMPT, --prompt PROMPT\n");
fprintf(stderr, " prompt to start generation with (default: random)\n");
fprintf(stderr, " -f FNAME, --file FNAME\n");
fprintf(stderr, " --repeat-last-n N last n tokens to consider for penalize (default: %d, 0 = disabled)\n", params.repeat_last_n);
fprintf(stderr, " --repeat-penalty N penalize repeat sequence of tokens (default: %.2f, 1.0 = disabled)\n", (double)params.repeat_penalty);
fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch);
+ fprintf(stderr, " -c N, --context N context / KV cache size (default: %d)\n", params.n_ctx);
+ fprintf(stderr, " --ignore-eos ignore EOS token during generation\n");
+ fprintf(stderr, " -ngl N, --gpu-layers N number of layers to offload to GPU on supported models (default: %d)\n", params.n_gpu_layers);
fprintf(stderr, " -m FNAME, --model FNAME\n");
fprintf(stderr, " model path (default: %s)\n", params.model.c_str());
fprintf(stderr, "\n");
//
struct gpt_params {
- int32_t seed = -1; // RNG seed
- int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
- int32_t n_predict = 200; // new tokens to predict
- int32_t n_batch = 8; // batch size for prompt processing
+ int32_t seed = -1; // RNG seed
+ int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
+ int32_t n_predict = 200; // new tokens to predict
+ int32_t n_parallel = 1; // number of parallel streams
+ int32_t n_batch = 8; // batch size for prompt processing
+ int32_t n_ctx = 2048; // context size (this is the KV cache max size)
+ int32_t n_gpu_layers = 0; // number of layers to offlload to the GPU
+
+ bool ignore_eos = false; // ignore EOS token when generating text
// sampling parameters
int32_t top_k = 40;
bool interactive = false;
int32_t interactive_port = -1;
-
- int32_t n_gpu_layers = 0;
};
bool gpt_params_parse(int argc, char ** argv, gpt_params & params);
# TODO: this is temporary
# need to export ggml symbols for MSVC, but too lazy ..
- add_executable(${TARGET} talk-llama.cpp llama.cpp ../common.cpp ../common-sdl.cpp ../../ggml.c ../../ggml-alloc.c ../../whisper.cpp)
+ add_executable(${TARGET}
+ talk-llama.cpp
+ llama.cpp
+ ../common.cpp
+ ../common-sdl.cpp
+ ../../ggml.c
+ ../../ggml-alloc.c
+ ../../whisper.cpp)
target_include_directories(${TARGET} PRIVATE ${SDL2_INCLUDE_DIRS} ../../)
target_link_libraries(${TARGET} PRIVATE ${SDL2_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
+#define LLAMA_API_INTERNAL
#include "llama.h"
+#include "unicode.h"
+
#include "ggml.h"
#include "ggml-alloc.h"
#ifdef GGML_USE_MPI
# include "ggml-mpi.h"
#endif
-#ifdef GGML_USE_K_QUANTS
-# ifndef QK_K
-# ifdef GGML_QKK_64
-# define QK_K 64
-# else
-# define QK_K 256
-# endif
+#ifndef QK_K
+# ifdef GGML_QKK_64
+# define QK_K 64
+# else
+# define QK_K 256
# endif
#endif
#include <cassert>
#include <cinttypes>
#include <climits>
+#include <cmath>
#include <cstdarg>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <ctime>
+#include <forward_list>
#include <fstream>
+#include <functional>
#include <initializer_list>
#include <map>
#include <memory>
#include <queue>
#include <random>
#include <regex>
+#include <set>
#include <sstream>
#include <thread>
#include <unordered_map>
//
LLAMA_ATTRIBUTE_FORMAT(2, 3)
-static void llama_log_internal (llama_log_level level, const char* format, ...);
-static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data);
+static void llama_log_internal (ggml_log_level level, const char* format, ...);
+static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);
-#define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__)
-#define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__)
-#define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__)
+#define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
+#define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
+#define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
//
// helpers
return lookup[highbits];
}
-void replace_all(std::string & s, const std::string & search, const std::string & replace) {
+static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
std::string result;
for (size_t pos = 0; ; pos += search.length()) {
auto new_pos = s.find(search, pos);
}
s = std::move(result);
}
+
+static bool is_float_close(float a, float b, float abs_tol) {
+ // Check for non-negative tolerance
+ if (abs_tol < 0.0) {
+ throw std::invalid_argument("Tolerance must be non-negative");
+ }
+
+ // Exact equality check
+ if (a == b) {
+ return true;
+ }
+
+ // Check for infinities
+ if (std::isinf(a) || std::isinf(b)) {
+ return false;
+ }
+
+ // Regular comparison using the provided absolute tolerance
+ return std::fabs(b - a) <= abs_tol;
+}
+
#ifdef GGML_USE_CPU_HBM
#include <hbwmalloc.h>
#endif
LLM_ARCH_GPTJ,
LLM_ARCH_GPTNEOX,
LLM_ARCH_MPT,
+ LLM_ARCH_STARCODER,
+ LLM_ARCH_PERSIMMON,
+ LLM_ARCH_REFACT,
+ LLM_ARCH_BLOOM,
LLM_ARCH_UNKNOWN,
};
static std::map<llm_arch, std::string> LLM_ARCH_NAMES = {
- { LLM_ARCH_LLAMA, "llama" },
- { LLM_ARCH_FALCON, "falcon" },
- { LLM_ARCH_GPT2, "gpt2" },
- { LLM_ARCH_GPTJ, "gptj" },
- { LLM_ARCH_GPTNEOX, "gptneox" },
- { LLM_ARCH_MPT, "mpt" },
- { LLM_ARCH_BAICHUAN,"baichuan" },
+ { LLM_ARCH_LLAMA, "llama" },
+ { LLM_ARCH_FALCON, "falcon" },
+ { LLM_ARCH_GPT2, "gpt2" },
+ { LLM_ARCH_GPTJ, "gptj" },
+ { LLM_ARCH_GPTNEOX, "gptneox" },
+ { LLM_ARCH_MPT, "mpt" },
+ { LLM_ARCH_BAICHUAN, "baichuan" },
+ { LLM_ARCH_STARCODER, "starcoder" },
+ { LLM_ARCH_PERSIMMON, "persimmon" },
+ { LLM_ARCH_REFACT, "refact" },
+ { LLM_ARCH_BLOOM, "bloom" },
};
enum llm_kv {
LLM_KV_ROPE_DIMENSION_COUNT,
LLM_KV_ROPE_FREQ_BASE,
LLM_KV_ROPE_SCALE_LINEAR,
+ LLM_KV_ROPE_SCALING_TYPE,
+ LLM_KV_ROPE_SCALING_FACTOR,
+ LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
+ LLM_KV_ROPE_SCALING_FINETUNED,
LLM_KV_TOKENIZER_MODEL,
LLM_KV_TOKENIZER_LIST,
};
static std::map<llm_kv, std::string> LLM_KV_NAMES = {
- { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
- { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
- { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
- { LLM_KV_GENERAL_NAME, "general.name" },
- { LLM_KV_GENERAL_AUTHOR, "general.author" },
- { LLM_KV_GENERAL_URL, "general.url" },
- { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
- { LLM_KV_GENERAL_LICENSE, "general.license" },
- { LLM_KV_GENERAL_SOURCE_URL, "general.source_url" },
- { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source_hf_repo" },
+ { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
+ { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
+ { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
+ { LLM_KV_GENERAL_NAME, "general.name" },
+ { LLM_KV_GENERAL_AUTHOR, "general.author" },
+ { LLM_KV_GENERAL_URL, "general.url" },
+ { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
+ { LLM_KV_GENERAL_LICENSE, "general.license" },
+ { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" },
+ { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" },
{ LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
{ LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
{ LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
{ LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
- { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
- { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
- { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
+ { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
+ { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
+ { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
+ { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" },
+ { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" },
+ { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" },
+ { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" },
{ LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
{ LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" },
enum llm_tensor {
LLM_TENSOR_TOKEN_EMBD,
+ LLM_TENSOR_TOKEN_EMBD_NORM,
LLM_TENSOR_POS_EMBD,
LLM_TENSOR_OUTPUT,
LLM_TENSOR_OUTPUT_NORM,
LLM_TENSOR_FFN_DOWN,
LLM_TENSOR_FFN_UP,
LLM_TENSOR_FFN_NORM,
+ LLM_TENSOR_ATTN_Q_NORM,
+ LLM_TENSOR_ATTN_K_NORM,
};
static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
},
},
+ {
+ LLM_ARCH_PERSIMMON,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd"},
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm"},
+ { LLM_TENSOR_OUTPUT, "output"},
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm"},
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv"},
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output"},
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"},
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"},
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm"},
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down"},
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up"},
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd"},
+ },
+ },
{
LLM_ARCH_MPT,
{
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
+ {
+ LLM_ARCH_STARCODER,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_POS_EMBD, "position_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ },
+ },
+ {
+ LLM_ARCH_REFACT,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
+ {
+ LLM_ARCH_BLOOM,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
},
},
{
//
#define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
-{ \
+do { \
const std::string skey(key); \
const int kid = gguf_find_key(ctx, skey.c_str()); \
if (kid >= 0) { \
} else if (req) { \
throw std::runtime_error(format("key not found in model: %s", skey.c_str())); \
} \
+} while (0)
+
+static std::map<int8_t, std::string> LLAMA_ROPE_SCALING_TYPES = {
+ { LLAMA_ROPE_SCALING_NONE, "none" },
+ { LLAMA_ROPE_SCALING_LINEAR, "linear" },
+ { LLAMA_ROPE_SCALING_YARN, "yarn" },
+};
+
+static int8_t llama_rope_scaling_type_from_string(const std::string & name) {
+ for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
+ if (kv.second == name) {
+ return kv.first;
+ }
+ }
+
+ return LLAMA_ROPE_SCALING_UNSPECIFIED;
}
//
typedef void (*offload_func_t)(struct ggml_tensor * tensor);
-static void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
+static void ggml_offload_nop(struct ggml_tensor * tensor) {
(void) tensor;
}
-static std::string llama_token_to_str(const struct llama_context * ctx, llama_token token) {
+static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) {
std::vector<char> result(8, 0);
- const int n_tokens = llama_token_to_piece(ctx, token, result.data(), result.size());
+ const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
if (n_tokens < 0) {
result.resize(-n_tokens);
- int check = llama_token_to_piece(ctx, token, result.data(), result.size());
+ int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
GGML_ASSERT(check == -n_tokens);
- } else {
+ }
+ else {
result.resize(n_tokens);
}
struct llama_state {
// We save the log callback globally
- llama_log_callback log_callback = llama_log_callback_default;
+ ggml_log_callback log_callback = llama_log_callback_default;
void * log_callback_user_data = nullptr;
};
// available llama models
enum e_model {
MODEL_UNKNOWN,
+ MODEL_1B,
MODEL_3B,
MODEL_7B,
+ MODEL_8B,
MODEL_13B,
+ MODEL_15B,
MODEL_30B,
MODEL_34B,
MODEL_40B,
};
static const size_t kB = 1024;
-static const size_t MB = kB*kB;
+static const size_t MB = 1024*kB;
+static const size_t GB = 1024*MB;
-// default hparams (LLaMA 7B)
struct llama_hparams {
- uint32_t n_vocab = 32000;
- uint32_t n_ctx_train = 2048; // the context size used during training
- uint32_t n_ctx = 512; // the context size used during inference
- uint32_t n_embd = 4096;
- uint32_t n_head = 32;
- uint32_t n_head_kv = 32;
- uint32_t n_layer = 32;
- uint32_t n_rot = 64;
- uint32_t n_ff = 11008;
-
- float f_norm_eps = 1e-5;
- float f_norm_rms_eps = 1e-5;
-
- float rope_freq_base = 10000.0f;
- float rope_freq_scale = 1.0f;
+ bool vocab_only;
+ uint32_t n_vocab;
+ uint32_t n_ctx_train; // context size the model was trained on
+ uint32_t n_embd;
+ uint32_t n_head;
+ uint32_t n_head_kv;
+ uint32_t n_layer;
+ uint32_t n_rot;
+ uint32_t n_ff;
+
+ float f_norm_eps;
+ float f_norm_rms_eps;
+
+ float rope_freq_base_train;
+ float rope_freq_scale_train;
+ uint32_t n_yarn_orig_ctx;
+ int8_t rope_scaling_type_train : 3;
+ bool rope_finetuned : 1;
+
+ float f_clamp_kqv;
+ float f_max_alibi_bias;
bool operator!=(const llama_hparams & other) const {
- return static_cast<bool>(memcmp(this, &other, sizeof(llama_hparams))); // NOLINT
+ if (this->vocab_only != other.vocab_only) return true;
+ if (this->n_vocab != other.n_vocab) return true;
+ if (this->n_ctx_train != other.n_ctx_train) return true;
+ if (this->n_embd != other.n_embd) return true;
+ if (this->n_head != other.n_head) return true;
+ if (this->n_head_kv != other.n_head_kv) return true;
+ if (this->n_layer != other.n_layer) return true;
+ if (this->n_rot != other.n_rot) return true;
+ if (this->n_ff != other.n_ff) return true;
+ if (this->rope_finetuned != other.rope_finetuned) return true;
+ if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true;
+
+ const float EPSILON = 1e-9;
+
+ if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true;
+ if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true;
+ if (!is_float_close(this->rope_freq_base_train, other.rope_freq_base_train, EPSILON)) return true;
+ if (!is_float_close(this->rope_freq_scale_train, other.rope_freq_scale_train, EPSILON)) return true;
+
+ return false;
}
uint32_t n_gqa() const {
uint32_t n_embd_gqa() const {
return n_embd/n_gqa();
}
+};
- size_t kv_size() const {
- size_t result = 2ull;
- result *= (size_t) n_embd_gqa();
- result *= (size_t) n_ctx;
- result *= (size_t) n_layer;
- result *= sizeof(ggml_fp16_t);
- return result;
- }
+struct llama_cparams {
+ uint32_t n_ctx; // context size used during inference
+ uint32_t n_batch;
+ uint32_t n_threads; // number of threads to use for generation
+ uint32_t n_threads_batch; // number of threads to use for batch processing
+
+ float rope_freq_base;
+ float rope_freq_scale;
+
+ uint32_t n_yarn_orig_ctx;
+ // These hyperparameters are not exposed in GGUF, because all
+ // existing YaRN models use the same values for them.
+ float yarn_ext_factor;
+ float yarn_attn_factor;
+ float yarn_beta_fast;
+ float yarn_beta_slow;
+
+ bool mul_mat_q;
};
struct llama_layer {
struct ggml_tensor * attn_norm_b;
struct ggml_tensor * attn_norm_2;
struct ggml_tensor * attn_norm_2_b;
+ struct ggml_tensor * attn_q_norm;
+ struct ggml_tensor * attn_q_norm_b;
+ struct ggml_tensor * attn_k_norm;
+ struct ggml_tensor * attn_k_norm_b;
// attention
struct ggml_tensor * wq;
struct ggml_tensor * wo;
struct ggml_tensor * wqkv;
+ // attention bias
+ struct ggml_tensor * bo;
+ struct ggml_tensor * bqkv;
+
// normalization
struct ggml_tensor * ffn_norm;
+ struct ggml_tensor * ffn_norm_b;
// ff
- struct ggml_tensor * w1; // ffn_gate
- struct ggml_tensor * w2; // ffn_down
- struct ggml_tensor * w3; // ffn_up
+ struct ggml_tensor * ffn_gate; // w1
+ struct ggml_tensor * ffn_down; // w2
+ struct ggml_tensor * ffn_up; // w3
+
+ // ff bias
+ struct ggml_tensor * ffn_down_b; // b2
+ struct ggml_tensor * ffn_up_b; // b3
+};
+
+struct llama_kv_cell {
+ llama_pos pos = -1;
+ llama_pos delta = 0;
+
+ std::set<llama_seq_id> seq_id;
+
+ bool has_seq_id(const llama_seq_id & id) const {
+ return seq_id.find(id) != seq_id.end();
+ }
};
+// ring-buffer of cached KV data
struct llama_kv_cache {
+ bool has_shift = false;
+
+ // Note: The value of head isn't only used to optimize searching
+ // for a free KV slot. llama_decode_internal also uses it, so it
+ // cannot be freely changed after a slot has been allocated.
+ uint32_t head = 0;
+ uint32_t size = 0;
+
+ // computed before each graph build
+ uint32_t n = 0;
+
+ std::vector<llama_kv_cell> cells;
+
struct ggml_tensor * k = NULL;
struct ggml_tensor * v = NULL;
llama_buffer buf;
- int n; // number of tokens currently in the cache
-
~llama_kv_cache() {
if (ctx) {
ggml_free(ctx);
std::unordered_map<token, id> token_to_id;
std::vector<token_data> id_to_token;
+ std::unordered_map<token, id> special_tokens_cache;
+
std::map<std::pair<std::string, std::string>, int> bpe_ranks;
// default LLaMA special tokens
id special_sep_id = -1;
id special_pad_id = -1;
- id linefeed_id = 13;
+ id linefeed_id = 13;
+ id special_prefix_id = 32007;
+ id special_middle_id = 32009;
+ id special_suffix_id = 32008;
+ id special_eot_id = 32010;
int find_bpe_rank(std::string token_left, std::string token_right) const {
- replace_all(token_left, " ", "\u0120");
- replace_all(token_left, "\n", "\u010A");
- replace_all(token_right, " ", "\u0120");
- replace_all(token_right, "\n", "\u010A");
+ GGML_ASSERT(token_left.find(" ") == std::string::npos);
+ GGML_ASSERT(token_left.find("\n") == std::string::npos);
+ GGML_ASSERT(token_right.find(" ") == std::string::npos);
+ GGML_ASSERT(token_right.find("\n") == std::string::npos);
auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
if (it == bpe_ranks.end()) {
std::string name = "n/a";
- llama_hparams hparams;
+ llama_hparams hparams = {};
llama_vocab vocab;
- struct ggml_tensor * tok_embeddings;
+ struct ggml_tensor * tok_embd;
+ struct ggml_tensor * pos_embd;
+ struct ggml_tensor * tok_norm;
+ struct ggml_tensor * tok_norm_b;
struct ggml_tensor * output_norm;
struct ggml_tensor * output_norm_b;
};
struct llama_context {
- llama_context(const llama_model & model) : model(model), t_load_us(model.t_load_us), t_start_us(model.t_start_us) {}
+ llama_context(const llama_model & model) : model(model), t_start_us(model.t_start_us), t_load_us(model.t_load_us) {}
~llama_context() {
- if (model_owner) {
- delete &model;
- }
#ifdef GGML_USE_METAL
if (ctx_metal) {
ggml_metal_free(ctx_metal);
}
}
+ llama_cparams cparams;
+
+ const llama_model & model;
+
+ // key + value cache for the self attention
+ struct llama_kv_cache kv_self;
+
std::mt19937 rng;
bool has_evaluated_once = false;
+ int64_t t_start_us;
+ int64_t t_load_us;
int64_t t_sample_us = 0;
- int64_t t_eval_us = 0;
int64_t t_p_eval_us = 0;
+ int64_t t_eval_us = 0;
int32_t n_sample = 0; // number of tokens sampled
- int32_t n_eval = 0; // number of eval calls
int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
-
- const llama_model & model;
-
- bool model_owner = false;
-
- int64_t t_load_us;
- int64_t t_start_us;
-
- // key + value cache for the self attention
- struct llama_kv_cache kv_self;
+ int32_t n_eval = 0; // number of eval calls
// decode output (2-dimensional array: [n_tokens][n_vocab])
std::vector<float> logits;
const struct llama_hparams & hparams,
struct llama_kv_cache & cache,
ggml_type wtype,
- int n_ctx,
+ uint32_t n_ctx,
int n_gpu_layers) {
- const int n_embd = hparams.n_embd_gqa();
- const int n_layer = hparams.n_layer;
+ const uint32_t n_embd = hparams.n_embd_gqa();
+ const uint32_t n_layer = hparams.n_layer;
const int64_t n_mem = n_layer*n_ctx;
const int64_t n_elements = n_embd*n_mem;
- cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
- cache.n = 0;
+ cache.has_shift = false;
+
+ cache.head = 0;
+ cache.size = n_ctx;
+
+ cache.cells.clear();
+ cache.cells.resize(n_ctx);
+
+ cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*ggml_tensor_overhead());
+ memset(cache.buf.data, 0, cache.buf.size);
struct ggml_init_params params;
params.mem_size = cache.buf.size;
(void) n_gpu_layers;
#ifdef GGML_USE_CUBLAS
- if (n_gpu_layers > n_layer + 1) {
+ size_t vram_kv_cache = 0;
+
+ if (n_gpu_layers > (int)n_layer + 1) {
ggml_cuda_assign_buffers_no_scratch(cache.v);
+ LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__);
+ vram_kv_cache += ggml_nbytes(cache.v);
}
- if (n_gpu_layers > n_layer + 2) {
+ if (n_gpu_layers > (int)n_layer + 2) {
ggml_cuda_assign_buffers_no_scratch(cache.k);
+ LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__);
+ vram_kv_cache += ggml_nbytes(cache.k);
+ }
+ if (vram_kv_cache > 0) {
+ LLAMA_LOG_INFO("%s: VRAM kv self = %.2f MB\n", __func__, vram_kv_cache / 1024.0 / 1024.0);
}
#endif // GGML_USE_CUBLAS
return true;
}
+// find an empty slot of size "n_tokens" in the cache
+// updates the cache head
+// Note: On success, it's important that cache.head points
+// to the first cell of the slot.
+static bool llama_kv_cache_find_slot(
+ struct llama_kv_cache & cache,
+ const struct llama_batch & batch) {
+ const uint32_t n_ctx = cache.size;
+ const uint32_t n_tokens = batch.n_tokens;
+
+ if (n_tokens > n_ctx) {
+ LLAMA_LOG_ERROR("%s: n_tokens=%d > n_ctx=%d\n", __func__, n_tokens, n_ctx);
+ return false;
+ }
+
+ uint32_t n_tested = 0;
+
+ while (true) {
+ if (cache.head + n_tokens > n_ctx) {
+ n_tested += n_ctx - cache.head;
+ cache.head = 0;
+ continue;
+ }
+
+ bool found = true;
+ for (uint32_t i = 0; i < n_tokens; i++) {
+ if (cache.cells[cache.head + i].pos >= 0) {
+ found = false;
+ cache.head += i + 1;
+ n_tested += i + 1;
+ break;
+ }
+ }
+
+ if (found) {
+ break;
+ }
+
+ if (n_tested >= n_ctx) {
+ //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
+ return false;
+ }
+ }
+
+ for (uint32_t i = 0; i < n_tokens; i++) {
+ cache.cells[cache.head + i].pos = batch.pos[i];
+
+ for (int32_t j = 0; j < batch.n_seq_id[i]; j++) {
+ cache.cells[cache.head + i].seq_id.insert(batch.seq_id[i][j]);
+ }
+ }
+
+ return true;
+}
+
+// find how many cells are currently in use
+static int32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) {
+ for (uint32_t i = cache.size - 1; i > 0; --i) {
+ if (cache.cells[i].pos >= 0 && !cache.cells[i].seq_id.empty()) {
+ return i + 1;
+ }
+ }
+
+ return 0;
+}
+
+static void llama_kv_cache_clear(struct llama_kv_cache & cache) {
+ for (int32_t i = 0; i < (int32_t) cache.size; ++i) {
+ cache.cells[i].pos = -1;
+ cache.cells[i].seq_id.clear();
+ }
+ cache.head = 0;
+}
+
+static void llama_kv_cache_seq_rm(
+ struct llama_kv_cache & cache,
+ llama_seq_id seq_id,
+ llama_pos p0,
+ llama_pos p1) {
+ uint32_t new_head = cache.size;
+
+ if (p0 < 0) p0 = 0;
+ if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
+
+ for (uint32_t i = 0; i < cache.size; ++i) {
+ if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
+ if (seq_id < 0) {
+ cache.cells[i].seq_id.clear();
+ } else if (cache.cells[i].has_seq_id(seq_id)) {
+ cache.cells[i].seq_id.erase(seq_id);
+ } else {
+ continue;
+ }
+ if (cache.cells[i].seq_id.empty()) {
+ cache.cells[i].pos = -1;
+ if (new_head == cache.size) new_head = i;
+ }
+ }
+ }
+
+ // If we freed up a slot, set head to it so searching can start there.
+ if (new_head != cache.size) cache.head = new_head;
+}
+
+static void llama_kv_cache_seq_cp(
+ struct llama_kv_cache & cache,
+ llama_seq_id seq_id_src,
+ llama_seq_id seq_id_dst,
+ llama_pos p0,
+ llama_pos p1) {
+ if (p0 < 0) p0 = 0;
+ if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
+
+ cache.head = 0;
+
+ for (uint32_t i = 0; i < cache.size; ++i) {
+ if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
+ cache.cells[i].seq_id.insert(seq_id_dst);
+ }
+ }
+}
+
+static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
+ uint32_t new_head = cache.size;
+
+ for (uint32_t i = 0; i < cache.size; ++i) {
+ if (!cache.cells[i].has_seq_id(seq_id)) {
+ cache.cells[i].pos = -1;
+ cache.cells[i].seq_id.clear();
+ if (new_head == cache.size) new_head = i;
+ } else {
+ cache.cells[i].seq_id.clear();
+ cache.cells[i].seq_id.insert(seq_id);
+ }
+ }
+
+ // If we freed up a slot, set head to it so searching can start there.
+ if (new_head != cache.size) cache.head = new_head;
+}
+
+static void llama_kv_cache_seq_shift(
+ struct llama_kv_cache & cache,
+ llama_seq_id seq_id,
+ llama_pos p0,
+ llama_pos p1,
+ llama_pos delta) {
+ uint32_t new_head = cache.size;
+
+ if (p0 < 0) p0 = 0;
+ if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
+
+ for (uint32_t i = 0; i < cache.size; ++i) {
+ if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
+ cache.has_shift = true;
+ cache.cells[i].pos += delta;
+ cache.cells[i].delta += delta;
+
+ if (cache.cells[i].pos < 0) {
+ cache.cells[i].pos = -1;
+ cache.cells[i].seq_id.clear();
+ if (new_head == cache.size) new_head = i;
+ }
+ }
+ }
+
+ // If we freed up a slot, set head to it so searching can start there.
+ // Otherwise we just start the next search from the beginning.
+ cache.head = new_head != cache.size ? new_head : 0;
+}
+
//
// model loading and saving
//
enum llama_fver {
GGUF_FILE_VERSION_V1 = 1,
GGUF_FILE_VERSION_V2 = 2,
+ GGUF_FILE_VERSION_V3 = 3,
};
static const char * llama_file_version_name(llama_fver version) {
switch (version) {
case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
- case GGUF_FILE_VERSION_V2: return "GGUF V2 (latest)";
+ case GGUF_FILE_VERSION_V2: return "GGUF V2";
+ case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
}
return "unknown";
int n_created = 0;
int64_t n_elements = 0;
+ size_t n_bytes = 0;
bool use_mmap = false;
const char * name = gguf_get_tensor_name(ctx_gguf, i);
struct ggml_tensor * t = ggml_get_tensor(ctx_meta, name);
n_elements += ggml_nelements(t);
+ n_bytes += ggml_nbytes(t);
}
LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
}
}
- struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta, ggml_backend backend) {
+ struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta, ggml_backend_type backend) {
if (backend != GGML_BACKEND_CPU) {
ggml_set_no_alloc(ctx, true);
}
return tensor;
}
- struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend backend) {
+ struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend) {
struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
if (cur == NULL) {
throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
}
+ if (backend == GGML_BACKEND_GPU_SPLIT) {
+ if (ne.size() == 1) {
+ throw std::runtime_error(format("%s: 1-dimensional tensor '%s' cannot be split on the GPU", __func__, name.c_str()));
+ }
+ }
+
{
bool is_ok = true;
for (size_t i = 0; i < ne.size(); ++i) {
lmlock->grow_to(size_lock);
}
break;
-#if defined(GGML_USE_CUBLAS)
+#ifdef GGML_USE_CUBLAS
case GGML_BACKEND_GPU:
case GGML_BACKEND_GPU_SPLIT:
// old code:
// load LLaMA models
//
-std::string llama_model_ftype_name(enum llama_ftype ftype) {
+static std::string llama_model_arch_name(llm_arch arch) {
+ auto it = LLM_ARCH_NAMES.find(arch);
+ if (it == LLM_ARCH_NAMES.end()) {
+ return "unknown";
+ }
+ return it->second;
+}
+
+static std::string llama_model_ftype_name(llama_ftype ftype) {
if (ftype & LLAMA_FTYPE_GUESSED) {
return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
}
static const char * llama_model_type_name(e_model type) {
switch (type) {
+ case MODEL_1B: return "1B";
case MODEL_3B: return "3B";
case MODEL_7B: return "7B";
+ case MODEL_8B: return "8B";
case MODEL_13B: return "13B";
+ case MODEL_15B: return "15B";
case MODEL_30B: return "30B";
case MODEL_34B: return "34B";
case MODEL_40B: return "40B";
static void llm_load_hparams(
llama_model_loader & ml,
- llama_model & model,
- int n_ctx,
- float rope_freq_base,
- float rope_freq_scale) {
+ llama_model & model) {
struct gguf_context * ctx = ml.ctx_gguf;
const auto kv = LLM_KV(model.arch);
GGUF_GET_KEY(ctx, model.name, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_GENERAL_NAME));
// get hparams kv
- GGUF_GET_KEY(ctx, hparams.n_vocab, gguf_get_arr_n, GGUF_TYPE_ARRAY, true, kv(LLM_KV_TOKENIZER_LIST));
- GGUF_GET_KEY(ctx, hparams.n_ctx_train, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_CONTEXT_LENGTH));
- GGUF_GET_KEY(ctx, hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_EMBEDDING_LENGTH));
- GGUF_GET_KEY(ctx, hparams.n_ff, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_FEED_FORWARD_LENGTH));
- GGUF_GET_KEY(ctx, hparams.n_head, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_ATTENTION_HEAD_COUNT));
- GGUF_GET_KEY(ctx, hparams.n_layer, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_BLOCK_COUNT));
+ GGUF_GET_KEY(ctx, hparams.n_vocab, gguf_get_arr_n, GGUF_TYPE_ARRAY, true, kv(LLM_KV_TOKENIZER_LIST));
+ GGUF_GET_KEY(ctx, hparams.n_ctx_train, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_CONTEXT_LENGTH));
+ GGUF_GET_KEY(ctx, hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_EMBEDDING_LENGTH));
+ GGUF_GET_KEY(ctx, hparams.n_ff, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_FEED_FORWARD_LENGTH));
+ GGUF_GET_KEY(ctx, hparams.n_head, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_ATTENTION_HEAD_COUNT));
+ GGUF_GET_KEY(ctx, hparams.n_layer, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_BLOCK_COUNT));
// n_head_kv is optional, default to n_head
hparams.n_head_kv = hparams.n_head;
GGUF_GET_KEY(ctx, hparams.n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ATTENTION_HEAD_COUNT_KV));
- // TODO: manually setting rope freq base and scale should override this
- // FIXME: partial fix when the param specified is not the default value, but
- // will not work for overriding the model value to the params default
+ hparams.rope_finetuned = false;
+ GGUF_GET_KEY(ctx, hparams.rope_finetuned, gguf_get_val_bool, GGUF_TYPE_BOOL, false,
+ kv(LLM_KV_ROPE_SCALING_FINETUNED));
- llama_context_params defaults = llama_context_default_params();
+ hparams.n_yarn_orig_ctx = hparams.n_ctx_train;
+ GGUF_GET_KEY(ctx, hparams.n_yarn_orig_ctx, gguf_get_val_u32, GGUF_TYPE_UINT32, false,
+ kv(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN));
- // rope_freq_base
- {
- float ropebase = 10000.0f;
- GGUF_GET_KEY(ctx, ropebase, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_FREQ_BASE));
- if (ropebase != 10000.0f && rope_freq_base == defaults.rope_freq_base) {
- rope_freq_base = ropebase;
- }
- }
+ // rope_freq_base (optional)
+ hparams.rope_freq_base_train = 10000.0f;
+ GGUF_GET_KEY(ctx, hparams.rope_freq_base_train, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_FREQ_BASE));
+
+ std::string rope_scaling("linear");
+ GGUF_GET_KEY(ctx, rope_scaling, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_ROPE_SCALING_TYPE));
+ hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
+ GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_UNSPECIFIED);
// rope_freq_scale (inverse of the kv) is optional
- {
- float ropescale = 1.0f;
+ float ropescale = 0.0f;
+ GGUF_GET_KEY(ctx, ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALING_FACTOR));
+ if (ropescale == 0.0f) { // try the old key name
GGUF_GET_KEY(ctx, ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALE_LINEAR));
- if (ropescale != 1.0f && rope_freq_scale == defaults.rope_freq_scale) {
- rope_freq_scale = 1.0f/ropescale;
- }
}
+ hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
// sanity check for n_rot (optional)
{
default: model.type = e_model::MODEL_UNKNOWN;
}
} break;
+ case LLM_ARCH_STARCODER:
+ {
+ GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
+ switch (hparams.n_layer) {
+ case 24: model.type = e_model::MODEL_1B; break;
+ case 36: model.type = e_model::MODEL_3B; break;
+ case 42: model.type = e_model::MODEL_7B; break;
+ case 40: model.type = e_model::MODEL_15B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
+ case LLM_ARCH_PERSIMMON:
+ {
+ GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
+ switch (hparams.n_layer) {
+ case 36: model.type = e_model::MODEL_8B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
+ case LLM_ARCH_REFACT:
+ {
+ GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
+ switch (hparams.n_layer) {
+ case 32: model.type = e_model::MODEL_1B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
+ case LLM_ARCH_BLOOM:
+ {
+ GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
+
+ switch (hparams.n_layer) {
+ case 24: model.type = e_model::MODEL_1B; break;
+ case 30:
+ switch (hparams.n_embd) {
+ case 2560: model.type = e_model::MODEL_3B; break;
+ case 4096: model.type = e_model::MODEL_7B; break;
+ } break;
+ }
+ } break;
+ case LLM_ARCH_MPT:
+ {
+ hparams.f_clamp_kqv = 0.0f;
+
+ GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
+ GGUF_GET_KEY(ctx, hparams.f_clamp_kqv, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ATTENTION_CLAMP_KQV));
+ GGUF_GET_KEY(ctx, hparams.f_max_alibi_bias, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_MAX_ALIBI_BIAS));
+
+ switch (hparams.n_layer) {
+ case 32: model.type = e_model::MODEL_7B; break;
+ case 48: model.type = e_model::MODEL_30B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
default: (void)0;
- };
+ }
model.ftype = ml.ftype;
-
- hparams.n_ctx = n_ctx;
- hparams.rope_freq_base = rope_freq_base;
- hparams.rope_freq_scale = rope_freq_scale;
}
// TODO: This should probably be in llama.h
-static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos);
+static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos, bool special = false);
static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch);
static void llm_load_vocab(
throw std::runtime_error("cannot find tokenizer vocab in model file\n");
}
+ const float * scores = nullptr;
const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
- if (score_idx == -1) {
- throw std::runtime_error("cannot find tokenizer scores in model file\n");
+ if (score_idx != -1) {
+ scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
}
- const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
-
+ const int * toktypes = nullptr;
const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
- if (toktype_idx == -1) {
- throw std::runtime_error("cannot find token type list in GGUF file\n");
+ if (toktype_idx != -1) {
+ toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
}
- const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
-
// determine vocab type
{
std::string tokenizer_name;
for (int i = 0; i < n_merges; i++) {
const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
+ GGML_ASSERT(codepoints_from_utf8(word).size() > 0);
std::string first;
std::string second;
for (uint32_t i = 0; i < n_vocab; i++) {
std::string word = gguf_get_arr_str(ctx, token_idx, i);
+ GGML_ASSERT(codepoints_from_utf8(word).size() > 0);
vocab.token_to_id[word] = i;
auto & token_data = vocab.id_to_token[i];
token_data.text = std::move(word);
- token_data.score = scores[i];
- token_data.type = (llama_token_type) toktypes[i];
+ token_data.score = scores ? scores[i] : 0.0f;
+ token_data.type = toktypes ? (llama_token_type) toktypes[i] : LLAMA_TOKEN_TYPE_NORMAL;
}
+ GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
// determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
vocab.linefeed_id = llama_byte_to_token(vocab, '\n');
} else {
- vocab.linefeed_id = llama_tokenize_internal(vocab, "\n", false)[0];
+ const std::vector<int> ids = llama_tokenize_internal(vocab, "\u010A", false);
+ GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
+ vocab.linefeed_id = ids[0];
}
// special tokens
- GGUF_GET_KEY(ctx, vocab.special_bos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_BOS_ID));
- GGUF_GET_KEY(ctx, vocab.special_eos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_EOS_ID));
- GGUF_GET_KEY(ctx, vocab.special_unk_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_UNK_ID));
- GGUF_GET_KEY(ctx, vocab.special_sep_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_SEP_ID));
- GGUF_GET_KEY(ctx, vocab.special_pad_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_PAD_ID));
-}
+ {
+ const std::vector<std::pair<enum llm_kv, int32_t &>> special_token_types = {
+ { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id },
+ { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id },
+ { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id },
+ { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id },
+ { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id },
+ };
+ for (const auto & it : special_token_types) {
+ const std::string & key = kv(std::get<0>(it));
+ int32_t & id = std::get<1>(it), old_id = id;
+
+ GGUF_GET_KEY(ctx, id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, key);
+ // Must be >= -1 and < vocab size. Since the key is unsigned, -1
+ // can only come from the default value, so there's no point in
+ // validating that.
+ if (size_t(id + 1) > vocab.id_to_token.size()) {
+ LLAMA_LOG_WARN("%s: bad special token: '%s' = %d, using default id %d\n",
+ __func__, key.c_str(), id, old_id);
+ id = old_id;
+ }
+ }
+ }
-static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
- const auto & hparams = model.hparams;
- const auto & vocab = model.vocab;
+ // build special tokens cache
+ {
+ // TODO: It is unclear (to me) at this point, whether special tokes are guaranteed to be of a deterministic type,
+ // and will always be correctly labeled in 'added_tokens.json' etc.
+ // The assumption is, since special tokens aren't meant to be exposed to end user, they are designed
+ // to be unmatchable by the tokenizer, therefore tokens from the vocab, which are unmatchable by the tokenizer
+ // are special tokens.
+ // From testing, this appears to corelate 1:1 with special tokens.
+ //
+
+ // Counting special tokens and verifying in only one direction
+ // is sufficient to detect difference in those two sets.
+ //
+ uint32_t special_tokens_count_by_type = 0;
+ uint32_t special_tokens_count_from_verification = 0;
+
+ bool special_tokens_definition_mismatch = false;
+
+ for (const auto & t : vocab.token_to_id) {
+ const auto & token = t.first;
+ const auto & id = t.second;
+
+ // Count all non-normal tokens in the vocab while iterating
+ if (vocab.id_to_token[id].type != LLAMA_TOKEN_TYPE_NORMAL) {
+ special_tokens_count_by_type++;
+ }
- // hparams
- LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
- LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch).c_str());
- LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, vocab.type == LLAMA_VOCAB_TYPE_SPM ? "SPM" : "BPE"); // TODO: fix
- LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
- LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
- LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
- LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx);
- LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
- LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
- LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
- LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
- LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
- LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
- LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
- LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
- LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
- LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
- LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
- LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
- LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
- LLAMA_LOG_INFO("%s: model size = %.2f B\n", __func__, ml.n_elements*1e-9);
+ // Skip single character tokens
+ if (token.length() > 1) {
+ bool is_tokenizable = false;
- // general kv
+ // Split token string representation in two, in all possible ways
+ // and check if both halves can be matched to a valid token
+ for (unsigned i = 1; i < token.length();) {
+ const auto left = token.substr(0, i);
+ const auto right = token.substr(i);
+
+ // check if we didnt partition in the middle of a utf sequence
+ auto utf = utf8_len(left.at(left.length() - 1));
+
+ if (utf == 1) {
+ if (vocab.token_to_id.find(left) != vocab.token_to_id.end() &&
+ vocab.token_to_id.find(right) != vocab.token_to_id.end() ) {
+ is_tokenizable = true;
+ break;
+ }
+ i++;
+ } else {
+ // skip over the rest of multibyte utf sequence
+ i += utf - 1;
+ }
+ }
+
+ if (!is_tokenizable) {
+ // Some tokens are multibyte, but they are utf sequences with equivalent text length of 1
+ // it's faster to re-filter them here, since there are way less candidates now
+
+ // Calculate a total "utf" length of a token string representation
+ size_t utf8_str_len = 0;
+ for (unsigned i = 0; i < token.length();) {
+ utf8_str_len++;
+ i += utf8_len(token.at(i));
+ }
+
+ // And skip the ones which are one character
+ if (utf8_str_len > 1) {
+ // At this point what we have left are special tokens only
+ vocab.special_tokens_cache[token] = id;
+
+ // Count manually found special tokens
+ special_tokens_count_from_verification++;
+
+ // If this manually found special token is not marked as such, flag a mismatch
+ if (vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL) {
+ special_tokens_definition_mismatch = true;
+ }
+ }
+ }
+ }
+ }
+
+ if (special_tokens_definition_mismatch || special_tokens_count_from_verification != special_tokens_count_by_type) {
+ LLAMA_LOG_WARN("%s: mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n",
+ __func__,
+ special_tokens_count_from_verification, vocab.id_to_token.size(),
+ special_tokens_count_by_type, vocab.id_to_token.size()
+ );
+ } else {
+ LLAMA_LOG_INFO("%s: special tokens definition check successful ( %u/%zu ).\n",
+ __func__,
+ special_tokens_count_from_verification, vocab.id_to_token.size()
+ );
+ }
+ }
+}
+
+static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
+ const auto & hparams = model.hparams;
+ const auto & vocab = model.vocab;
+
+ const auto rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
+
+ // hparams
+ LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
+ LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch).c_str());
+ LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, vocab.type == LLAMA_VOCAB_TYPE_SPM ? "SPM" : "BPE"); // TODO: fix
+ LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
+ LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
+ LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
+ LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
+ LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
+ LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
+ LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
+ LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
+ LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
+ LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
+ LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
+ LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
+ LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
+ LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
+ LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type.c_str());
+ LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
+ LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
+ LLAMA_LOG_INFO("%s: n_yarn_orig_ctx = %u\n", __func__, hparams.n_yarn_orig_ctx);
+ LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
+ LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
+ LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
+ LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
+ if (ml.n_bytes < GB) {
+ LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
+ } else {
+ LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
+ }
+
+ // general kv
LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
// special tokens
static void llm_load_tensors(
llama_model_loader & ml,
llama_model & model,
- int n_batch,
int n_gpu_layers,
int main_gpu,
const float * tensor_split,
- const bool mul_mat_q,
- bool low_vram,
- ggml_type memory_type,
bool use_mlock,
llama_progress_callback progress_callback,
void * progress_callback_user_data) {
}
(void) main_gpu;
- (void) mul_mat_q;
-#if defined(GGML_USE_CUBLAS)
+#ifdef GGML_USE_CUBLAS
LLAMA_LOG_INFO("%s: using " GGML_CUDA_NAME " for GPU acceleration\n", __func__);
ggml_cuda_set_main_device(main_gpu);
- ggml_cuda_set_mul_mat_q(mul_mat_q);
#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
#elif defined(GGML_USE_CLBLAST)
const auto tn = LLM_TN(model.arch);
switch (model.arch) {
case LLM_ARCH_LLAMA:
+ case LLM_ARCH_REFACT:
{
- model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+ model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
// output
{
- ggml_backend backend_norm;
- ggml_backend backend_output;
+ ggml_backend_type backend_norm;
+ ggml_backend_type backend_output;
if (n_gpu_layers > int(n_layer)) {
// norm is not performance relevant on its own but keeping it in VRAM reduces data copying
// on Windows however this is detrimental unless everything is on the GPU
#ifndef _WIN32
- backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+ backend_norm = LLAMA_BACKEND_OFFLOAD;
#else
- backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+ backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
#endif // _WIN32
backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
model.layers.resize(n_layer);
for (uint32_t i = 0; i < n_layer; ++i) {
- const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
- const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
auto & layer = model.layers[i];
layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
- layer.w1 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
- layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
- layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
+ layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
if (backend == GGML_BACKEND_GPU) {
vram_weights +=
- ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
- ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
- ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
+ ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
+ ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
+ ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up);
}
}
} break;
case LLM_ARCH_BAICHUAN:
{
- model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+ model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
{
- ggml_backend backend_norm;
- ggml_backend backend_output;
+ ggml_backend_type backend_norm;
+ ggml_backend_type backend_output;
if (n_gpu_layers > int(n_layer)) {
// norm is not performance relevant on its own but keeping it in VRAM reduces data copying
// on Windows however this is detrimental unless everything is on the GPU
#ifndef _WIN32
- backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+ backend_norm = LLAMA_BACKEND_OFFLOAD;
#else
- backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+ backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
#endif // _WIN32
backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
model.layers.resize(n_layer);
for (uint32_t i = 0; i < n_layer; ++i) {
- const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
- const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
auto & layer = model.layers[i];
layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
- layer.w1 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
- layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
- layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
+ layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
if (backend == GGML_BACKEND_GPU) {
vram_weights +=
- ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
- ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
- ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
+ ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
+ ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
+ ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up);
}
}
} break;
{
// TODO: CPU-only for now
- model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+ model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
// output
{
- ggml_backend backend_norm;
- ggml_backend backend_output;
+ ggml_backend_type backend_norm;
+ ggml_backend_type backend_output;
if (n_gpu_layers > int(n_layer)) {
// norm is not performance relevant on its own but keeping it in VRAM reduces data copying
// on Windows however this is detrimental unless everything is on the GPU
#ifndef _WIN32
- backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+ backend_norm = LLAMA_BACKEND_OFFLOAD;
#else
- backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+ backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
#endif // _WIN32
backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
model.layers.resize(n_layer);
for (uint32_t i = 0; i < n_layer; ++i) {
- const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
- const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
auto & layer = model.layers[i];
layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
- layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
- layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
+ layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
if (backend == GGML_BACKEND_GPU) {
vram_weights +=
ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) +
ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.wo) +
- ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
+ ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up);
+ }
+ }
+ } break;
+ case LLM_ARCH_STARCODER:
+ {
+ model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+ model.pos_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, hparams.n_ctx_train}, GGML_BACKEND_CPU);
+
+ // output
+ {
+ ggml_backend_type backend_norm;
+ ggml_backend_type backend_output;
+
+ if (n_gpu_layers > int(n_layer)) {
+ // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
+ // on Windows however this is detrimental unless everything is on the GPU
+#ifndef _WIN32
+ backend_norm = LLAMA_BACKEND_OFFLOAD;
+#else
+ backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+#endif // _WIN32
+
+ backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
+
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
+
+ if (backend_norm == GGML_BACKEND_GPU) {
+ vram_weights += ggml_nbytes(model.output_norm);
+ vram_weights += ggml_nbytes(model.output_norm_b);
+ }
+ if (backend_output == GGML_BACKEND_GPU_SPLIT) {
+ vram_weights += ggml_nbytes(model.output);
+ }
+ }
+
+ const uint32_t n_ff = hparams.n_ff;
+
+ const int i_gpu_start = n_layer - n_gpu_layers;
+
+ model.layers.resize(n_layer);
+
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+
+ auto & layer = model.layers[i];
+
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
+ layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
+
+ layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
+ layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
+
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+ layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
+
+ layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
+ layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
+
+ layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
+ layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
+
+ layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
+
+ if (backend == GGML_BACKEND_GPU) {
+ vram_weights +=
+ ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) +
+ ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) +
+ ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) +
+ ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.ffn_norm_b) +
+ ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b) +
+ ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b);
+ }
+ }
+ } break;
+ case LLM_ARCH_PERSIMMON:
+ {
+ model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+
+ {
+ ggml_backend_type backend_norm;
+ ggml_backend_type backend_output;
+
+ if (n_gpu_layers > int(n_layer)) {
+ // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
+ // on Windows however this is detrimental unless everything is on the GPU
+#ifndef _WIN32
+ backend_norm = LLAMA_BACKEND_OFFLOAD;
+#else
+ backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+#endif // _WIN32
+
+ backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
+
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
+
+ if (backend_norm == GGML_BACKEND_GPU) {
+ vram_weights += ggml_nbytes(model.output_norm);
+ vram_weights += ggml_nbytes(model.output_norm_b);
+ }
+ if (backend_output == GGML_BACKEND_GPU_SPLIT) {
+ vram_weights += ggml_nbytes(model.output);
+ }
+ }
+
+ const uint32_t n_ff = hparams.n_ff;
+ const int i_gpu_start = n_layer - n_gpu_layers;
+ model.layers.resize(n_layer);
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT;
+ auto & layer = model.layers[i];
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
+ layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
+ layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
+ layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+ layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
+ layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
+ layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
+ layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
+ layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
+ layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
+ layer.attn_q_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {64}, backend);
+ layer.attn_q_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {64}, backend);
+ layer.attn_k_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {64}, backend);
+ layer.attn_k_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {64}, backend);
+ }
+ } break;
+ case LLM_ARCH_BLOOM:
+ {
+ // TODO: CPU-only for now
+
+ model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+ model.tok_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, GGML_BACKEND_CPU);
+ model.tok_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, GGML_BACKEND_CPU);
+
+ // output
+ {
+ ggml_backend_type backend_norm;
+ ggml_backend_type backend_output;
+
+ if (n_gpu_layers > int(n_layer)) {
+ // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
+ // on Windows however this is detrimental unless everything is on the GPU
+#ifndef _WIN32
+ backend_norm = LLAMA_BACKEND_OFFLOAD;
+#else
+ backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+#endif // _WIN32
+
+ backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
+
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
+
+ if (backend_norm == GGML_BACKEND_GPU) {
+ vram_weights += ggml_nbytes(model.output_norm);
+ vram_weights += ggml_nbytes(model.output_norm_b);
+ }
+ if (backend_output == GGML_BACKEND_GPU_SPLIT) {
+ vram_weights += ggml_nbytes(model.output);
+ }
+ }
+
+ const uint32_t n_ff = hparams.n_ff;
+
+ const int i_gpu_start = n_layer - n_gpu_layers;
+
+ model.layers.resize(n_layer);
+
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+
+ auto & layer = model.layers[i];
+
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
+ layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
+
+ layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
+ layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
+
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+ layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
+
+ layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
+ layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
+
+ layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
+ layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
+
+ layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
+
+ if (backend == GGML_BACKEND_GPU) {
+ vram_weights +=
+ ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) +
+ ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) +
+ ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) +
+ ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.ffn_norm_b) +
+ ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b) +
+ ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b);
+ }
+ }
+ } break;
+ case LLM_ARCH_MPT:
+ {
+ model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+
+ // output
+ {
+ ggml_backend_type backend_norm;
+ ggml_backend_type backend_output;
+
+ if (n_gpu_layers > int(n_layer)) {
+ // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
+ // on Windows however this is detrimental unless everything is on the GPU
+#ifndef _WIN32
+ backend_norm = LLAMA_BACKEND_OFFLOAD;
+#else
+ backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+#endif // _WIN32
+
+ backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
+
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
+
+ if (backend_norm == GGML_BACKEND_GPU) {
+ vram_weights += ggml_nbytes(model.output_norm);
+ }
+ if (backend_output == GGML_BACKEND_GPU_SPLIT) {
+ vram_weights += ggml_nbytes(model.output);
+ }
+ }
+
+ const uint32_t n_ff = hparams.n_ff;
+
+ const int i_gpu_start = n_layer - n_gpu_layers;
+
+ model.layers.resize(n_layer);
+
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+
+ auto & layer = model.layers[i];
+
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
+ layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+
+ layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
+
+ layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
+ layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+
+ if (backend == GGML_BACKEND_GPU) {
+ vram_weights +=
+ ggml_nbytes(layer.attn_norm) +
+ ggml_nbytes(layer.wqkv) +
+ ggml_nbytes(layer.wo) +
+ ggml_nbytes(layer.ffn_norm) +
+ ggml_nbytes(layer.ffn_down) +
+ ggml_nbytes(layer.ffn_up);
}
}
} break;
default:
throw std::runtime_error("unknown architecture");
- };
+ }
}
ml.done_getting_tensors();
// print memory requirements
{
- const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1;
-
// this is the total memory required to run the inference
size_t mem_required =
ctx_size +
mmapped_size - vram_weights; // weights in VRAM not in memory
- // this is the memory required by one llama_state
- const size_t mem_required_state = scale*hparams.kv_size();
-
- LLAMA_LOG_INFO("%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
- mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
-
- (void) n_batch;
+ LLAMA_LOG_INFO("%s: mem required = %7.2f MB\n", __func__, mem_required / 1024.0 / 1024.0);
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
if (n_gpu_layers > (int) hparams.n_layer) {
LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
}
- size_t vram_kv_cache = 0;
#ifdef GGML_USE_CUBLAS
const int max_backend_supported_layers = hparams.n_layer + 3;
- const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
- if (n_gpu_layers > (int) hparams.n_layer + 1) {
- if (low_vram) {
- LLAMA_LOG_INFO("%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
- } else {
- LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__);
- vram_kv_cache += hparams.kv_size() / 2;
- }
- }
- if (n_gpu_layers > (int) hparams.n_layer + 2) {
- if (low_vram) {
- LLAMA_LOG_WARN("%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
- } else {
- LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__);
- vram_kv_cache += hparams.kv_size() / 2;
- }
- }
-#elif defined(GGML_USE_CLBLAST)
+ const int max_offloadable_layers = hparams.n_layer + 3;
+#elif GGML_USE_CLBLAST
const int max_backend_supported_layers = hparams.n_layer + 1;
- const int max_offloadable_layers = hparams.n_layer + 1;
+ const int max_offloadable_layers = hparams.n_layer + 1;
#endif // GGML_USE_CUBLAS
- LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n",
- __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
- LLAMA_LOG_INFO("%s: VRAM used: %zu MB\n",
- __func__, (vram_weights + vram_kv_cache + MB - 1) / MB); // round up
+ LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
+ LLAMA_LOG_INFO("%s: VRAM used: %.2f MB\n", __func__, vram_weights / 1024.0 / 1024.0);
#else
(void) n_gpu_layers;
#endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
}
(void) tensor_split;
-#if defined(GGML_USE_CUBLAS)
+#ifdef GGML_USE_CUBLAS
{
ggml_cuda_set_tensor_split(tensor_split);
}
model.t_load_us = ggml_time_us() - model.t_start_us;
}
-static bool llama_model_load(
- const std::string & fname,
- llama_model & model,
- int n_ctx,
- int n_batch,
- int n_gpu_layers,
- int main_gpu,
- const float * tensor_split,
- const bool mul_mat_q,
- float rope_freq_base,
- float rope_freq_scale,
- bool low_vram,
- ggml_type memory_type,
- bool use_mmap,
- bool use_mlock,
- bool vocab_only,
- llama_progress_callback progress_callback,
- void *progress_callback_user_data) {
+static bool llama_model_load(const std::string & fname, llama_model & model, const llama_model_params & params) {
try {
- std::unique_ptr<llama_model_loader> ml(new llama_model_loader(fname, use_mmap));
+ llama_model_loader ml(fname, params.use_mmap);
+
+ model.hparams.vocab_only = params.vocab_only;
- llm_load_arch (*ml, model);
- llm_load_hparams(*ml, model, n_ctx, rope_freq_base, rope_freq_scale);
- llm_load_vocab (*ml, model);
+ llm_load_arch (ml, model);
+ llm_load_hparams(ml, model);
+ llm_load_vocab (ml, model);
- llm_load_print_meta(*ml, model);
+ llm_load_print_meta(ml, model);
if (model.hparams.n_vocab != model.vocab.id_to_token.size()) {
throw std::runtime_error("vocab size mismatch");
}
- if (vocab_only) {
+ if (params.vocab_only) {
LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
return true;
}
llm_load_tensors(
- *ml, model, n_batch, n_gpu_layers,
- main_gpu, tensor_split, mul_mat_q, low_vram, memory_type,
- use_mlock, progress_callback, progress_callback_user_data);
+ ml, model, params.n_gpu_layers, params.main_gpu, params.tensor_split, params.use_mlock,
+ params.progress_callback, params.progress_callback_user_data
+ );
} catch (const std::exception & err) {
LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
return false;
return true;
}
-static struct ggml_cgraph * llm_build_llama(
- llama_context & lctx,
- const llama_token * tokens,
- const float * embd,
- int n_tokens,
- int n_past) {
+//
+// llm_build
+//
- GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT
+using llm_build_cb = std::function<void(struct ggml_tensor * cur, const char * name, int nl)>;
- const int N = n_tokens;
+enum llm_rope_type {
+ LLM_ROPE,
+ LLM_ROPE_NEOX,
+ LLM_ROPE_GLM,
+};
- const auto & model = lctx.model;
- const auto & hparams = model.hparams;
+enum llm_ffn_op_type {
+ LLM_FFN_SILU,
+ LLM_FFN_GELU,
+ LLM_FFN_RELU,
+ LLM_FFN_RELU_SQR,
+};
- const auto & kv_self = lctx.kv_self;
+enum llm_ffn_gate_type {
+ LLM_FFN_SEQ,
+ LLM_FFN_PAR, // ffn_gate is parallel to ffn_up
+};
- GGML_ASSERT(!!kv_self.ctx);
+enum llm_norm_type {
+ LLM_NORM,
+ LLM_NORM_RMS,
+};
- const int64_t n_embd = hparams.n_embd;
+static struct ggml_tensor * llm_build_inp_embd(
+ struct ggml_context * ctx,
+ const llama_hparams & hparams,
+ const llama_batch & batch,
+ struct ggml_tensor * tok_embd,
+ const llm_build_cb & cb) {
+ const int64_t n_embd = hparams.n_embd;
+
+ struct ggml_tensor * inpL;
+
+ if (batch.token) {
+ struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, batch.n_tokens);
+ cb(inp_tokens, "inp_tokens", -1);
+
+ inpL = ggml_get_rows(ctx, tok_embd, inp_tokens);
+ } else {
+#ifdef GGML_USE_MPI
+ GGML_ASSERT(false && "not implemented");
+#endif
+
+ inpL = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
+ }
+
+ return inpL;
+}
+
+// Persimmon: n_rot = n_embd_head/2
+// Other: n_rot = n_embd_head
+static void llm_build_k_shift(
+ struct ggml_context * ctx,
+ const llama_hparams & hparams,
+ const llama_cparams & cparams,
+ const llama_kv_cache & kv,
+ struct ggml_cgraph * graph,
+ llm_rope_type type,
+ int64_t n_ctx,
+ int64_t n_rot,
+ float freq_base,
+ float freq_scale,
+ const llm_build_cb & cb) {
const int64_t n_layer = hparams.n_layer;
- const int64_t n_ctx = hparams.n_ctx;
- const int64_t n_head = hparams.n_head;
const int64_t n_head_kv = hparams.n_head_kv;
- const int64_t n_embd_head = hparams.n_embd_head();
const int64_t n_embd_gqa = hparams.n_embd_gqa();
+ const int64_t n_embd_head = hparams.n_embd_head();
+ const int32_t n_orig_ctx = cparams.n_yarn_orig_ctx;
+ const float ext_factor = cparams.yarn_ext_factor;
+ const float attn_factor = cparams.yarn_attn_factor;
+ const float beta_fast = cparams.yarn_beta_fast;
+ const float beta_slow = cparams.yarn_beta_slow;
- GGML_ASSERT(n_embd_head == hparams.n_rot);
+ GGML_ASSERT(n_embd_head % n_rot == 0);
- const float freq_base = hparams.rope_freq_base;
- const float freq_scale = hparams.rope_freq_scale;
- const float norm_rms_eps = hparams.f_norm_rms_eps;
+ struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, n_ctx);
+ cb(K_shift, "K_shift", -1);
- const int n_gpu_layers = model.n_gpu_layers;
+ int rope_type = 0;
- auto & buf_compute = lctx.buf_compute;
+ switch (type) {
+ case LLM_ROPE: rope_type = 0; break;
+ case LLM_ROPE_NEOX: rope_type = 2; break;
+ case LLM_ROPE_GLM: rope_type = 4; break;
+ }
- struct ggml_init_params params = {
- /*.mem_size =*/ buf_compute.size,
- /*.mem_buffer =*/ buf_compute.data,
- /*.no_alloc =*/ false,
- };
+ for (int il = 0; il < n_layer; ++il) {
+ struct ggml_tensor * tmp =
+ // we rotate only the first n_rot dimensions
+ ggml_rope_custom_inplace(ctx,
+ ggml_view_3d(ctx, kv.k,
+ n_rot, n_head_kv, n_ctx,
+ ggml_element_size(kv.k)*n_embd_head,
+ ggml_element_size(kv.k)*n_embd_gqa,
+ ggml_element_size(kv.k)*n_embd_gqa*n_ctx*il),
+ K_shift, n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow);
+ cb(tmp, "K_shifted", il);
+ ggml_build_forward_expand(graph, tmp);
+ }
+}
+
+static void llm_build_kv_store(
+ struct ggml_context * ctx,
+ const llama_hparams & hparams,
+ const llama_kv_cache & kv,
+ struct ggml_cgraph * graph,
+ struct ggml_tensor * k_cur,
+ struct ggml_tensor * v_cur,
+ int64_t n_ctx,
+ int32_t n_tokens,
+ int32_t kv_head,
+ const llm_build_cb & cb,
+ int64_t il) {
+ const int64_t n_embd_gqa = hparams.n_embd_gqa();
+
+ // compute the transposed [n_tokens, n_embd] V matrix
+ struct ggml_tensor * v_cur_t = ggml_transpose(ctx, ggml_reshape_2d(ctx, v_cur, n_embd_gqa, n_tokens));
+ //struct ggml_tensor * v_cur_t = ggml_transpose(ctx, v_cur); // TODO: reshape above is likely not needed
+ cb(v_cur_t, "v_cur_t", il);
+
+ struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k, n_tokens*n_embd_gqa,
+ (ggml_element_size(kv.k)*n_embd_gqa)*(il*n_ctx + kv_head));
+ cb(k_cache_view, "k_cache_view", il);
+
+ struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, kv.v, n_tokens, n_embd_gqa,
+ ( n_ctx)*ggml_element_size(kv.v),
+ (il*n_ctx)*ggml_element_size(kv.v)*n_embd_gqa + kv_head*ggml_element_size(kv.v));
+ cb(v_cache_view, "v_cache_view", il);
+
+ // important: storing RoPE-ed version of K in the KV cache!
+ ggml_build_forward_expand(graph, ggml_cpy(ctx, k_cur, k_cache_view));
+ ggml_build_forward_expand(graph, ggml_cpy(ctx, v_cur_t, v_cache_view));
+}
+
+static struct ggml_tensor * llm_build_norm(
+ struct ggml_context * ctx,
+ struct ggml_tensor * cur,
+ const llama_hparams & hparams,
+ struct ggml_tensor * mw,
+ struct ggml_tensor * mb,
+ llm_norm_type type,
+ const llm_build_cb & cb,
+ int il) {
+ switch (type) {
+ case LLM_NORM: cur = ggml_norm (ctx, cur, hparams.f_norm_eps); break;
+ case LLM_NORM_RMS: cur = ggml_rms_norm(ctx, cur, hparams.f_norm_rms_eps); break;
+ }
- params.no_alloc = true;
+ if (mw || mb) {
+ cb(cur, "norm", il);
+ }
- struct ggml_context * ctx0 = ggml_init(params);
+ if (mw) {
+ cur = ggml_mul(ctx, cur, mw);
+ if (mb) {
+ cb(cur, "norm_w", il);
+ }
+ }
- ggml_cgraph * gf = ggml_new_graph(ctx0);
+ if (mb) {
+ cur = ggml_add(ctx, cur, mb);
+ }
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
+ return cur;
+}
+
+static struct ggml_tensor * llm_build_ffn(
+ struct ggml_context * ctx,
+ struct ggml_tensor * cur,
+ struct ggml_tensor * up,
+ struct ggml_tensor * up_b,
+ struct ggml_tensor * gate,
+ struct ggml_tensor * gate_b,
+ struct ggml_tensor * down,
+ struct ggml_tensor * down_b,
+ llm_ffn_op_type type_op,
+ llm_ffn_gate_type type_gate,
+ const llm_build_cb & cb,
+ int il) {
+ struct ggml_tensor * tmp = ggml_mul_mat(ctx, up, cur);
+ cb(tmp, "ffn_up", il);
- if (tokens) {
- struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
+ if (up_b) {
+ tmp = ggml_add(ctx, tmp, up_b);
+ cb(tmp, "ffn_up_b", il);
+ }
- ggml_allocr_alloc(lctx.alloc, inp_tokens);
- if (!ggml_allocr_is_measure(lctx.alloc)) {
- memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
+ if (gate) {
+ switch (type_gate) {
+ case LLM_FFN_SEQ:
+ {
+ cur = ggml_mul_mat(ctx, gate, tmp);
+ cb(cur, "ffn_gate", il);
+ } break;
+ case LLM_FFN_PAR:
+ {
+ cur = ggml_mul_mat(ctx, gate, cur);
+ cb(cur, "ffn_gate", il);
+ } break;
}
- ggml_set_name(inp_tokens, "inp_tokens");
- inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
+ if (gate_b) {
+ cur = ggml_add(ctx, cur, gate_b);
+ cb(cur, "ffn_gate_b", il);
+ }
} else {
-#ifdef GGML_USE_MPI
- GGML_ASSERT(false && "not implemented");
-#endif
+ cur = tmp;
+ }
- inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
+ switch (type_op) {
+ case LLM_FFN_SILU:
+ {
+ cur = ggml_silu(ctx, cur);
+ cb(cur, "ffn_silu", il);
+ } break;
+ case LLM_FFN_GELU:
+ {
+ cur = ggml_gelu(ctx, cur);
+ cb(cur, "ffn_gelu", il);
+ } break;
+ case LLM_FFN_RELU:
+ {
+ cur = ggml_relu(ctx, cur);
+ cb(cur, "ffn_relu", il);
+ } break;
+ case LLM_FFN_RELU_SQR:
+ {
+ cur = ggml_relu(ctx, cur);
+ cb(cur, "ffn_relu", il);
- ggml_allocr_alloc(lctx.alloc, inpL);
- if (!ggml_allocr_is_measure(lctx.alloc)) {
- memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
- }
+ cur = ggml_sqr(ctx, cur);
+ cb(cur, "ffn_sqr(relu)", il);
+ } break;
}
- const int i_gpu_start = n_layer - n_gpu_layers;
- (void) i_gpu_start;
-
- // offload functions set the tensor output backend to GPU
- // tensors are GPU-accelerated if any input or the output has been offloaded
- //
- // with the low VRAM option VRAM scratch is disabled in llama_load_model_internal
- // in that case ggml_cuda_assign_buffers has no effect
- offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
- offload_func_t offload_func_kq = llama_nop;
- offload_func_t offload_func_v = llama_nop;
+ if (type_gate == LLM_FFN_PAR) {
+ cur = ggml_mul(ctx, cur, tmp);
+ cb(cur, "ffn_gate_par", il);
+ }
-#ifdef GGML_USE_CUBLAS
- if (n_gpu_layers > n_layer) {
- offload_func_nr = ggml_cuda_assign_buffers_no_alloc;
+ cur = ggml_mul_mat(ctx, down, cur);
+ if (down_b) {
+ cb(cur, "ffn_down", il);
}
- if (n_gpu_layers > n_layer + 1) {
- offload_func_v = ggml_cuda_assign_buffers_no_alloc;
+
+ if (down_b) {
+ cur = ggml_add(ctx, cur, down_b);
}
- if (n_gpu_layers > n_layer + 2) {
- offload_func_kq = ggml_cuda_assign_buffers_no_alloc;
+
+ return cur;
+}
+
+// if max_alibi_bias > 0 then apply ALiBi
+static struct ggml_tensor * llm_build_kqv(
+ struct ggml_context * ctx,
+ const llama_hparams & hparams,
+ const llama_kv_cache & kv,
+ struct ggml_tensor * wo,
+ struct ggml_tensor * wo_b,
+ struct ggml_tensor * q_cur,
+ struct ggml_tensor * kq_scale,
+ struct ggml_tensor * kq_mask,
+ int64_t n_ctx,
+ int32_t n_tokens,
+ int32_t n_kv,
+ float max_alibi_bias,
+ const llm_build_cb & cb,
+ int il) {
+ const int64_t n_embd = hparams.n_embd;
+ const int64_t n_head = hparams.n_head;
+ const int64_t n_head_kv = hparams.n_head_kv;
+ const int64_t n_embd_head = hparams.n_embd_head();
+ const int64_t n_embd_gqa = hparams.n_embd_gqa();
+
+ struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3);
+ cb(q, "q", il);
+
+ struct ggml_tensor * k =
+ ggml_view_3d(ctx, kv.k,
+ n_embd_head, n_kv, n_head_kv,
+ ggml_element_size(kv.k)*n_embd_gqa,
+ ggml_element_size(kv.k)*n_embd_head,
+ ggml_element_size(kv.k)*n_embd_gqa*n_ctx*il);
+ cb(k, "k", il);
+
+ struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
+ cb(kq, "kq", il);
+
+ kq = ggml_scale(ctx, kq, kq_scale);
+ cb(kq, "kq_scaled", il);
+
+ if (max_alibi_bias > 0.0f) {
+ // TODO: n_head or n_head_kv
+ // TODO: K-shift is likely not working
+ // TODO: change to ggml_add
+ kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, max_alibi_bias);
+ cb(kq, "kq_scaled_alibi", il);
+ }
+
+ kq = ggml_add(ctx, kq, kq_mask);
+ cb(kq, "kq_masked", il);
+
+ kq = ggml_soft_max(ctx, kq);
+ cb(kq, "kq_soft_max", il);
+
+ // split cached v into n_head heads
+ struct ggml_tensor * v =
+ ggml_view_3d(ctx, kv.v,
+ n_kv, n_embd_head, n_head_kv,
+ ggml_element_size(kv.v)*n_ctx,
+ ggml_element_size(kv.v)*n_ctx*n_embd_head,
+ ggml_element_size(kv.v)*n_ctx*n_embd_gqa*il);
+ cb(v, "v", il);
+
+ struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
+ cb(kqv, "kqv", il);
+
+ struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3);
+ cb(kqv_merged, "kqv_merged", il);
+
+ struct ggml_tensor * cur = ggml_cont_2d(ctx, kqv_merged, n_embd, n_tokens);
+ cb(cur, "kqv_merged_cont", il);
+
+ cur = ggml_mul_mat(ctx, wo, cur);
+ if (wo_b) {
+ cb(cur, "kqv_wo", il);
+ }
+
+ if (wo_b) {
+ cur = ggml_add(ctx, cur, wo_b);
+ }
+
+ return cur;
+}
+
+struct llm_build_context {
+ const llama_model & model;
+ const llama_hparams & hparams;
+ const llama_cparams & cparams;
+ const llama_batch & batch;
+ const llama_kv_cache & kv_self;
+
+ const int64_t n_embd;
+ const int64_t n_layer;
+ const int64_t n_ctx; // user-specified context size (can be different from n_ctx_train)
+ const int64_t n_head;
+ const int64_t n_head_kv;
+ const int64_t n_embd_head;
+ const int64_t n_embd_gqa;
+
+ const float freq_base;
+ const float freq_scale;
+ const float ext_factor;
+ const float attn_factor;
+ const float beta_fast;
+ const float beta_slow;
+ const float norm_eps;
+ const float norm_rms_eps;
+
+ const int32_t n_tokens;
+ const int32_t n_kv; // size of KV cache to consider (n_kv <= n_ctx)
+ const int32_t kv_head; // index of where we store new KV data in the cache
+ const int32_t n_orig_ctx;
+
+ const bool do_rope_shift;
+
+ const llm_build_cb & cb;
+
+ llama_buffer & buf_compute;
+
+ struct ggml_context * ctx0 = nullptr;
+
+ // TODO: consider making the entire interface noexcept
+ llm_build_context(
+ llama_context & lctx,
+ const llama_batch & batch,
+ const llm_build_cb & cb,
+ bool worst_case) :
+ model (lctx.model),
+ hparams (model.hparams),
+ cparams (lctx.cparams),
+ batch (batch),
+ kv_self (lctx.kv_self),
+ n_embd (hparams.n_embd),
+ n_layer (hparams.n_layer),
+ n_ctx (cparams.n_ctx),
+ n_head (hparams.n_head),
+ n_head_kv (hparams.n_head_kv),
+ n_embd_head (hparams.n_embd_head()),
+ n_embd_gqa (hparams.n_embd_gqa()),
+ freq_base (cparams.rope_freq_base),
+ freq_scale (cparams.rope_freq_scale),
+ ext_factor (cparams.yarn_ext_factor),
+ attn_factor (cparams.yarn_attn_factor),
+ beta_fast (cparams.yarn_beta_fast),
+ beta_slow (cparams.yarn_beta_slow),
+ norm_eps (hparams.f_norm_eps),
+ norm_rms_eps (hparams.f_norm_rms_eps),
+ n_tokens (batch.n_tokens),
+ n_kv (worst_case ? n_ctx : kv_self.n),
+ kv_head (worst_case ? n_ctx - n_tokens : kv_self.head),
+ n_orig_ctx (cparams.n_yarn_orig_ctx),
+ do_rope_shift (worst_case || kv_self.has_shift),
+ cb (cb),
+ buf_compute (lctx.buf_compute) {
+ GGML_ASSERT(!!kv_self.ctx);
+
+ // all initializations should be done in init()
+ }
+
+ void init() {
+ struct ggml_init_params params = {
+ /*.mem_size =*/ buf_compute.size,
+ /*.mem_buffer =*/ buf_compute.data,
+ /*.no_alloc =*/ true,
+ };
+
+ ctx0 = ggml_init(params);
}
-#endif // GGML_USE_CUBLAS
- struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- ggml_allocr_alloc(lctx.alloc, KQ_scale);
- if (!ggml_allocr_is_measure(lctx.alloc)) {
- ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
+ void free() {
+ if (ctx0) {
+ ggml_free(ctx0);
+ ctx0 = nullptr;
+ }
}
- ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
- for (int il = 0; il < n_layer; ++il) {
- ggml_format_name(inpL, "layer_inp_%d", il);
+ struct ggml_cgraph * build_llama() {
+ struct ggml_cgraph * gf = ggml_new_graph(ctx0);
- offload_func_t offload_func = llama_nop;
+ GGML_ASSERT(n_embd_head == hparams.n_rot);
-#ifdef GGML_USE_CUBLAS
- if (il >= i_gpu_start) {
- offload_func = ggml_cuda_assign_buffers_no_alloc;
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
+
+ inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
+ cb(inpL, "inp_embd", -1);
+
+ // inp_pos - contains the positions
+ struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
+ cb(inp_pos, "inp_pos", -1);
+
+ // KQ_scale
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ cb(KQ_scale, "KQ_scale", -1);
+
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ cb(KQ_mask, "KQ_mask", -1);
+
+ // shift the entire K-cache if needed
+ if (do_rope_shift) {
+ llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb);
}
-#endif // GGML_USE_CUBLAS
- struct ggml_tensor * inpSA = inpL;
+ for (int il = 0; il < n_layer; ++il) {
+ struct ggml_tensor * inpSA = inpL;
- // norm
- {
- cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps);
- offload_func(cur);
- ggml_set_name(cur, "rms_norm_0");
+ // norm
+ cur = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm, NULL,
+ LLM_NORM_RMS, cb, il);
+ cb(cur, "attn_norm", il);
+
+ // self-attention
+ {
+ // compute Q and K and RoPE them
+ struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
+ cb(Qcur, "Qcur", il);
+
+ struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
+ cb(Kcur, "Kcur", il);
+
+ struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
+ cb(Vcur, "Vcur", il);
+
+ Qcur = ggml_rope_custom(
+ ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
+ n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(Qcur, "Qcur", il);
+
+ Kcur = ggml_rope_custom(
+ ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
+ n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(Kcur, "Kcur", il);
+
+ llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
+
+ cur = llm_build_kqv(ctx0, hparams, kv_self,
+ model.layers[il].wo, NULL,
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ cb(cur, "kqv_out", il);
+ }
+
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
+ cb(ffn_inp, "ffn_inp", il);
+
+ // feed-forward network
+ {
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
+ model.layers[il].ffn_norm, NULL,
+ LLM_NORM_RMS, cb, il);
+ cb(cur, "ffn_norm", il);
+
+ cur = llm_build_ffn(ctx0, cur,
+ model.layers[il].ffn_up, NULL,
+ model.layers[il].ffn_gate, NULL,
+ model.layers[il].ffn_down, NULL,
+ LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
+ cb(cur, "ffn_out", il);
+ }
+
+ cur = ggml_add(ctx0, cur, ffn_inp);
+ cb(cur, "l_out", il);
- // cur = cur*attn_norm(broadcasted)
- cur = ggml_mul(ctx0, cur, model.layers[il].attn_norm);
- offload_func(cur);
- ggml_set_name(cur, "attention_norm_0");
+ // input for next layer
+ inpL = cur;
}
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
- offload_func_kq(tmpk);
- ggml_set_name(tmpk, "tmpk");
+ cur = inpL;
+
+ cur = llm_build_norm(ctx0, cur, hparams,
+ model.output_norm, NULL,
+ LLM_NORM_RMS, cb, -1);
+ cb(cur, "result_norm", -1);
+
+ // lm_head
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output", -1);
+
+ ggml_build_forward_expand(gf, cur);
+
+ return gf;
+ }
+
+ struct ggml_cgraph * build_baichuan() {
+ struct ggml_cgraph * gf = ggml_new_graph(ctx0);
- struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
- offload_func_kq(tmpq);
- ggml_set_name(tmpq, "tmpq");
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
- struct ggml_tensor * Kcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
- offload_func_kq(Kcur);
- ggml_set_name(Kcur, "Kcur");
+ inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
+ cb(inpL, "inp_embd", -1);
- struct ggml_tensor * Qcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
- offload_func_kq(Qcur);
- ggml_set_name(Qcur, "Qcur");
+ // inp_pos - contains the positions
+ struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
+ cb(inp_pos, "inp_pos", -1);
- // store key and value to memory
+ // KQ_scale
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ cb(KQ_scale, "KQ_scale", -1);
+
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ cb(KQ_mask, "KQ_mask", -1);
+
+ // shift the entire K-cache if needed
+ if (do_rope_shift) {
+ llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE, n_ctx, n_embd_head, freq_base, freq_scale, cb);
+ }
+
+ for (int il = 0; il < n_layer; ++il) {
+ struct ggml_tensor * inpSA = inpL;
+
+ cur = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm, NULL,
+ LLM_NORM_RMS, cb, il);
+ cb(cur, "attn_norm", il);
+
+ // self-attention
{
- // compute the transposed [N, n_embd] V matrix
+ struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
+ cb(Qcur, "Qcur", il);
+
+ struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
+ cb(Kcur, "Kcur", il);
+
+ struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
+ cb(Vcur, "Vcur", il);
+
+ switch (model.type) {
+ case MODEL_7B:
+ Qcur = ggml_rope_custom(
+ ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
+ n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ Kcur = ggml_rope_custom(
+ ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
+ n_embd_head, 0, 0, n_orig_ctx, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ break;
+ case MODEL_13B:
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens);
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens);
+ break;
+ default:
+ GGML_ASSERT(false);
+ }
+ cb(Qcur, "Qcur", il);
+ cb(Kcur, "Kcur", il);
- struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
- offload_func_v(tmpv);
- ggml_set_name(tmpv, "tmpv");
+ llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, N));
- offload_func_v(Vcur);
- ggml_set_name(Vcur, "Vcur");
+ // apply ALiBi for 13B model
+ const float max_alibi_bias = model.type == MODEL_13B ? 8.0f : -1.0f;
- struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + n_past));
- offload_func_kq(k);
- ggml_set_name(k, "k");
+ cur = llm_build_kqv(ctx0, hparams, kv_self,
+ model.layers[il].wo, NULL,
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, cb, il);
+ cb(cur, "kqv_out", il);
+ }
- struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd_gqa,
- ( n_ctx)*ggml_element_size(kv_self.v),
- (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + n_past*ggml_element_size(kv_self.v));
- offload_func_v(v);
- ggml_set_name(v, "v");
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
+ cb(ffn_inp, "ffn_inp", il);
- // important: storing RoPE-ed version of K in the KV cache!
- ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
- ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
+ // feed-forward network
+ {
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
+ model.layers[il].ffn_norm, NULL,
+ LLM_NORM_RMS, cb, il);
+ cb(cur, "ffn_norm", il);
+
+ cur = llm_build_ffn(ctx0, cur,
+ model.layers[il].ffn_up, NULL,
+ model.layers[il].ffn_gate, NULL,
+ model.layers[il].ffn_down, NULL,
+ LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
+ cb(cur, "ffn_out", il);
}
- struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
- offload_func_kq(Q);
- ggml_set_name(Q, "Q");
-
- struct ggml_tensor * K =
- ggml_view_3d(ctx0, kv_self.k,
- n_embd_head, n_past + N, n_head_kv,
- ggml_element_size(kv_self.k)*n_embd_gqa,
- ggml_element_size(kv_self.k)*n_embd_head,
- ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il);
- offload_func_kq(K);
- ggml_set_name(K, "K");
-
- // K * Q
- struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
- offload_func_kq(KQ);
- ggml_set_name(KQ, "KQ");
-
- // KQ_scaled = KQ / sqrt(n_embd_head)
- // KQ_scaled shape [n_past + N, N, n_head, 1]
- struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
- offload_func_kq(KQ_scaled);
- ggml_set_name(KQ_scaled, "KQ_scaled");
-
- // KQ_masked = mask_past(KQ_scaled)
- struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
- offload_func_kq(KQ_masked);
- ggml_set_name(KQ_masked, "KQ_masked");
-
- // KQ = soft_max(KQ_masked)
- struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
- offload_func_v(KQ_soft_max);
- ggml_set_name(KQ_soft_max, "KQ_soft_max");
-
- // split cached V into n_head heads
- struct ggml_tensor * V =
- ggml_view_3d(ctx0, kv_self.v,
- n_past + N, n_embd_head, n_head_kv,
- ggml_element_size(kv_self.v)*n_ctx,
- ggml_element_size(kv_self.v)*n_ctx*n_embd_head,
- ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il);
- offload_func_v(V);
- ggml_set_name(V, "V");
-
-#if 1
- struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
- offload_func_v(KQV);
- ggml_set_name(KQV, "KQV");
-#else
- // make V contiguous in memory to speed up the matmul, however we waste time on the copy
- // on M1 this is faster for the perplexity computation, but ~5% slower for the single-token generation
- // is there a better way?
- struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd_head, n_head));
- struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_cont, KQ_soft_max);
-#endif
+ cur = ggml_add(ctx0, cur, ffn_inp);
+ cb(cur, "l_out", il);
+
+ // input for next layer
+ inpL = cur;
+ }
+
+ cur = inpL;
+
+ cur = llm_build_norm(ctx0, cur, hparams,
+ model.output_norm, NULL,
+ LLM_NORM_RMS, cb, -1);
+ cb(cur, "result_norm", -1);
+
+ // lm_head
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output", -1);
+
+ ggml_build_forward_expand(gf, cur);
+
+ return gf;
+ }
- // KQV_merged = KQV.permute(0, 2, 1, 3)
- struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
- offload_func_v(KQV_merged);
- ggml_set_name(KQV_merged, "KQV_merged");
+ struct ggml_cgraph * build_falcon() {
+ struct ggml_cgraph * gf = ggml_new_graph(ctx0);
- // cur = KQV_merged.contiguous().view(n_embd, N)
- cur = ggml_cpy(ctx0,
- KQV_merged,
- ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
- offload_func_v(cur);
- ggml_set_name(cur, "KQV_merged_contiguous");
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
- // projection (no bias)
- cur = ggml_mul_mat(ctx0,
- model.layers[il].wo,
- cur);
- offload_func(cur);
- ggml_set_name(cur, "result_wo");
+ inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
+ cb(inpL, "inp_embd", -1);
+
+ // inp_pos - contains the positions
+ struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
+ cb(inp_pos, "inp_pos", -1);
+
+ // KQ_scale
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ cb(KQ_scale, "KQ_scale", -1);
+
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ cb(KQ_mask, "KQ_mask", -1);
+
+ // shift the entire K-cache if needed
+ if (do_rope_shift) {
+ llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
}
- struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
- offload_func(inpFF);
- ggml_set_name(inpFF, "inpFF");
+ for (int il = 0; il < n_layer; ++il) {
+ struct ggml_tensor * attn_norm;
- // feed-forward network
- {
- // norm
+ attn_norm = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm,
+ model.layers[il].attn_norm_b,
+ LLM_NORM, cb, il);
+ cb(attn_norm, "attn_norm", il);
+
+ // self-attention
{
- cur = ggml_rms_norm(ctx0, inpFF, norm_rms_eps);
- offload_func(cur);
- ggml_set_name(cur, "rms_norm_1");
-
- // cur = cur*ffn_norm(broadcasted)
- cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm);
- offload_func(cur);
- ggml_set_name(cur, "ffn_norm");
+ if (model.layers[il].attn_norm_2) {
+ // Falcon-40B
+ cur = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm_2,
+ model.layers[il].attn_norm_2_b,
+ LLM_NORM, cb, il);
+ cb(cur, "attn_norm_2", il);
+ } else {
+ cur = attn_norm;
+ }
+
+ cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
+ cb(cur, "wqkv", il);
+
+ struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
+
+ cb(Qcur, "Qcur", il);
+ cb(Kcur, "Kcur", il);
+ cb(Vcur, "Vcur", il);
+
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+
+ // using mode = 2 for neox mode
+ Qcur = ggml_rope_custom(
+ ctx0, Qcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
+ freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(Qcur, "Qcur", il);
+
+ Kcur = ggml_rope_custom(
+ ctx0, Kcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
+ freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(Kcur, "Kcur", il);
+
+ llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
+
+ cur = llm_build_kqv(ctx0, hparams, kv_self,
+ model.layers[il].wo, NULL,
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ cb(cur, "kqv_out", il);
}
- struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
- model.layers[il].w3,
- cur);
- offload_func(tmp);
- ggml_set_name(tmp, "result_w3");
+ struct ggml_tensor * ffn_inp = cur;
- cur = ggml_mul_mat(ctx0,
- model.layers[il].w1,
- cur);
- offload_func(cur);
- ggml_set_name(cur, "result_w1");
+ // feed forward
+ {
+ cur = llm_build_ffn(ctx0, attn_norm, // !! use the attn norm, not the result
+ model.layers[il].ffn_up, NULL,
+ NULL, NULL,
+ model.layers[il].ffn_down, NULL,
+ LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
+ cb(cur, "ffn_out", il);
+ }
- // SILU activation
- cur = ggml_silu(ctx0, cur);
- offload_func(cur);
- ggml_set_name(cur, "silu");
+ cur = ggml_add(ctx0, cur, ffn_inp);
+ cb(cur, "l_out", il);
- cur = ggml_mul(ctx0, cur, tmp);
- offload_func(cur);
- ggml_set_name(cur, "silu_x_result_w3");
+ cur = ggml_add(ctx0, cur, inpL);
+ cb(cur, "l_out", il);
- cur = ggml_mul_mat(ctx0,
- model.layers[il].w2,
- cur);
- offload_func(cur);
- ggml_set_name(cur, "result_w2");
+ // input for next layer
+ inpL = cur;
}
- cur = ggml_add(ctx0, cur, inpFF);
- offload_func(cur);
- ggml_set_name(cur, "inpFF_+_result_w2");
+ cur = inpL;
- // input for next layer
- inpL = cur;
- }
+ // norm
+ cur = llm_build_norm(ctx0, cur, hparams,
+ model.output_norm,
+ model.output_norm_b,
+ LLM_NORM, cb, -1);
+ cb(cur, "result_norm", -1);
- cur = inpL;
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output", -1);
- // norm
- {
- cur = ggml_rms_norm(ctx0, cur, norm_rms_eps);
- offload_func_nr(cur);
- ggml_set_name(cur, "rms_norm_2");
+ ggml_build_forward_expand(gf, cur);
- // cur = cur*norm(broadcasted)
- cur = ggml_mul(ctx0, cur, model.output_norm);
- // offload_func_nr(cur); // TODO CPU + GPU mirrored backend
- ggml_set_name(cur, "result_norm");
+ return gf;
}
- // lm_head
- cur = ggml_mul_mat(ctx0, model.output, cur);
- ggml_set_name(cur, "result_output");
+ struct ggml_cgraph * build_starcoder() {
+ struct ggml_cgraph * gf = ggml_new_graph(ctx0);
- ggml_build_forward_expand(gf, cur);
+ struct ggml_tensor * cur;
+ struct ggml_tensor * pos;
+ struct ggml_tensor * inpL;
- ggml_free(ctx0);
+ inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
+ cb(inpL, "inp_embd", -1);
- return gf;
-}
+ // inp_pos - contains the positions
+ struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
+ cb(inp_pos, "inp_pos", -1);
+ // KQ_scale
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ cb(KQ_scale, "KQ_scale", -1);
-static struct ggml_cgraph * llm_build_baichaun(
- llama_context & lctx,
- const llama_token * tokens,
- const float * embd,
- int n_tokens,
- int n_past) {
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ cb(KQ_mask, "KQ_mask", -1);
- GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT
+ pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
+ cb(pos, "pos_embd", -1);
- const int N = n_tokens;
+ inpL = ggml_add(ctx0, inpL, pos);
+ cb(inpL, "inpL", -1);
- const auto & model = lctx.model;
- const auto & hparams = model.hparams;
+ for (int il = 0; il < n_layer; ++il) {
+ cur = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm,
+ model.layers[il].attn_norm_b,
+ LLM_NORM, cb, il);
+ cb(cur, "attn_norm", il);
- const auto & kv_self = lctx.kv_self;
+ // self-attention
+ {
+ cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
+ cb(cur, "wqkv", il);
- GGML_ASSERT(!!kv_self.ctx);
+ cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
+ cb(cur, "bqkv", il);
- const int64_t n_embd = hparams.n_embd;
- const int64_t n_layer = hparams.n_layer;
- const int64_t n_ctx = hparams.n_ctx;
- const int64_t n_head = hparams.n_head;
- const int64_t n_head_kv = hparams.n_head_kv;
- const int64_t n_embd_head = hparams.n_embd_head();
- const int64_t n_embd_gqa = hparams.n_embd_gqa();
+ struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- GGML_ASSERT(n_embd_head == hparams.n_rot);
+ cb(Qcur, "Qcur", il);
+ cb(Kcur, "Kcur", il);
+ cb(Vcur, "Vcur", il);
- const float freq_base = hparams.rope_freq_base;
- const float freq_scale = hparams.rope_freq_scale;
- const float norm_rms_eps = hparams.f_norm_rms_eps;
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- const int n_gpu_layers = model.n_gpu_layers;
+ llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- auto & buf_compute = lctx.buf_compute;
+ cur = llm_build_kqv(ctx0, hparams, kv_self,
+ model.layers[il].wo, model.layers[il].bo,
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ cb(cur, "kqv_out", il);
+ }
- struct ggml_init_params params = {
- /*.mem_size =*/ buf_compute.size,
- /*.mem_buffer =*/ buf_compute.data,
- /*.no_alloc =*/ false,
- };
+ // add the input
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
+ cb(ffn_inp, "ffn_inp", il);
- params.no_alloc = true;
+ // FF
+ {
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
+ model.layers[il].ffn_norm,
+ model.layers[il].ffn_norm_b,
+ LLM_NORM, cb, il);
+ cb(cur, "ffn_norm", il);
+
+ cur = llm_build_ffn(ctx0, cur,
+ model.layers[il].ffn_up, model.layers[il].ffn_up_b,
+ NULL, NULL,
+ model.layers[il].ffn_down, model.layers[il].ffn_down_b,
+ LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
+ cb(cur, "ffn_out", il);
+ }
- struct ggml_context * ctx0 = ggml_init(params);
+ inpL = ggml_add(ctx0, cur, ffn_inp);
+ cb(inpL, "l_out", il);
+ }
- ggml_cgraph * gf = ggml_new_graph(ctx0);
+ cur = llm_build_norm(ctx0, inpL, hparams,
+ model.output_norm,
+ model.output_norm_b,
+ LLM_NORM, cb, -1);
+ cb(cur, "result_norm", -1);
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output", -1);
+
+ ggml_build_forward_expand(gf, cur);
+
+ return gf;
+ }
+
+ struct ggml_cgraph * build_persimmon() {
+ struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+
+ const int64_t n_rot = n_embd_head / 2;
+
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
- if (tokens) {
- struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
+ inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
+ cb(inpL, "imp_embd", -1);
- ggml_allocr_alloc(lctx.alloc, inp_tokens);
- if (!ggml_allocr_is_measure(lctx.alloc)) {
- memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
+ struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
+ cb(inp_pos, "inp_pos", -1);
+
+ // KQ_scale
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ cb(KQ_scale, "KQ_scale", -1);
+
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ cb(KQ_mask, "KQ_mask", -1);
+
+ if (do_rope_shift) {
+ llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
}
- ggml_set_name(inp_tokens, "inp_tokens");
- inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
- } else {
-#ifdef GGML_USE_MPI
- GGML_ASSERT(false && "not implemented");
-#endif
+ for (int il = 0; il < n_layer; ++il) {
+ struct ggml_tensor * residual = inpL;
+
+ cur = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm,
+ model.layers[il].attn_norm_b,
+ LLM_NORM, cb, il);
+ cb(cur, "attn_norm", il);
- inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
+ // self attention
+ {
+ cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
+ cb(cur, "wqkv", il);
+
+ cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
+ cb(cur, "bqkv", il);
+
+ // split qkv
+ GGML_ASSERT(n_head_kv == n_head);
+
+ struct ggml_tensor * tmpqkv = ggml_reshape_4d(ctx0, cur, n_embd_head, 3, n_head, n_tokens);
+ cb(tmpqkv, "tmpqkv", il);
+
+ struct ggml_tensor * tmpqkv_perm = ggml_cont(ctx0, ggml_permute(ctx0, tmpqkv, 0, 3, 1, 2));
+ cb(tmpqkv_perm, "tmpqkv", il);
+
+ struct ggml_tensor * tmpq = ggml_view_3d(
+ ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
+ ggml_element_size(tmpqkv_perm) * n_embd_head,
+ ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
+ 0
+ );
+ cb(tmpq, "tmpq", il);
+
+ struct ggml_tensor * tmpk = ggml_view_3d(
+ ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
+ ggml_element_size(tmpqkv_perm) * n_embd_head,
+ ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
+ ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens
+ );
+ cb(tmpk, "tmpk", il);
+
+ // Q/K Layernorm
+ tmpq = llm_build_norm(ctx0, tmpq, hparams,
+ model.layers[il].attn_q_norm,
+ model.layers[il].attn_q_norm_b,
+ LLM_NORM, cb, il);
+ cb(tmpq, "tmpq", il);
+
+ tmpk = llm_build_norm(ctx0, tmpk, hparams,
+ model.layers[il].attn_k_norm,
+ model.layers[il].attn_k_norm_b,
+ LLM_NORM, cb, il);
+ cb(tmpk, "tmpk", il);
+
+ // RoPE the first n_rot of q/k, pass the other half, and concat.
+ struct ggml_tensor * qrot = ggml_view_3d(
+ ctx0, tmpq, n_rot, n_head, n_tokens,
+ ggml_element_size(tmpq) * n_embd_head,
+ ggml_element_size(tmpq) * n_embd_head * n_head,
+ 0
+ );
+ cb(qrot, "qrot", il);
+
+ struct ggml_tensor * krot = ggml_view_3d(
+ ctx0, tmpk, n_rot, n_head, n_tokens,
+ ggml_element_size(tmpk) * n_embd_head,
+ ggml_element_size(tmpk) * n_embd_head * n_head,
+ 0
+ );
+ cb(krot, "krot", il);
+
+ // get the second half of tmpq, e.g tmpq[n_rot:, :, :]
+ struct ggml_tensor * qpass = ggml_view_3d(
+ ctx0, tmpq, n_rot, n_head, n_tokens,
+ ggml_element_size(tmpq) * n_embd_head,
+ ggml_element_size(tmpq) * n_embd_head * n_head,
+ ggml_element_size(tmpq) * n_rot
+ );
+ cb(qpass, "qpass", il);
+
+ struct ggml_tensor * kpass = ggml_view_3d(
+ ctx0, tmpk, n_rot, n_head, n_tokens,
+ ggml_element_size(tmpk) * n_embd_head,
+ ggml_element_size(tmpk) * n_embd_head * n_head,
+ ggml_element_size(tmpk) * n_rot
+ );
+ cb(kpass, "kpass", il);
+
+ struct ggml_tensor * qrotated = ggml_rope_custom(
+ ctx0, qrot, inp_pos, n_rot, 2, 0, n_orig_ctx,
+ freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(qrotated, "qrotated", il);
+
+ struct ggml_tensor * krotated = ggml_rope_custom(
+ ctx0, krot, inp_pos, n_rot, 2, 0, n_orig_ctx,
+ freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(krotated, "krotated", il);
+
+ // ggml currently only supports concatenation on dim=2
+ // so we need to permute qrot, qpass, concat, then permute back.
+ qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3));
+ cb(qrotated, "qrotated", il);
+
+ krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3));
+ cb(krotated, "krotated", il);
+
+ qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3));
+ cb(qpass, "qpass", il);
+
+ kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3));
+ cb(kpass, "kpass", il);
+
+ struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass);
+ cb(Qcur, "Qcur", il);
+
+ struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass);
+ cb(Kcur, "Kcur", il);
+
+ struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 1, 2, 0, 3));
+ cb(Q, "Q", il);
+
+ Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3));
+ cb(Kcur, "Kcur", il);
+
+ struct ggml_tensor * Vcur = ggml_view_3d(
+ ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
+ ggml_element_size(tmpqkv_perm) * n_embd_head,
+ ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
+ ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens * 2
+ );
+ cb(Vcur, "Vcur", il);
+
+ llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
+
+ // TODO: not tested, could be broken
+ cur = llm_build_kqv(ctx0, hparams, kv_self,
+ model.layers[il].wo, model.layers[il].bo,
+ Q, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ cb(cur, "kqv_out", il);
+ }
- ggml_allocr_alloc(lctx.alloc, inpL);
- if (!ggml_allocr_is_measure(lctx.alloc)) {
- memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur);
+ cb(ffn_inp, "ffn_inp", il);
+
+ // feed-forward network
+ {
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
+ model.layers[il].ffn_norm,
+ model.layers[il].ffn_norm_b,
+ LLM_NORM, cb, il);
+ cb(cur, "ffn_norm", il);
+
+ cur = llm_build_ffn(ctx0, cur,
+ model.layers[il].ffn_up, model.layers[il].ffn_up_b,
+ NULL, NULL,
+ model.layers[il].ffn_down, model.layers[il].ffn_down_b,
+ LLM_FFN_RELU_SQR, LLM_FFN_SEQ, cb, il);
+ cb(cur, "ffn_out", il);
+ }
+
+ cur = ggml_add(ctx0, cur, ffn_inp);
+ cb(cur, "l_out", il);
+
+ inpL = cur;
}
- }
- const int i_gpu_start = n_layer - n_gpu_layers;
- (void) i_gpu_start;
+ cur = inpL;
- // offload functions set the tensor output backend to GPU
- // tensors are GPU-accelerated if any input or the output has been offloaded
- //
- // with the low VRAM option VRAM scratch is disabled in llama_load_model_internal
- // in that case ggml_cuda_assign_buffers has no effect
- offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
- offload_func_t offload_func_kq = llama_nop;
- offload_func_t offload_func_v = llama_nop;
+ cur = llm_build_norm(ctx0, cur, hparams,
+ model.output_norm,
+ model.output_norm_b,
+ LLM_NORM, cb, -1);
+ cb(cur, "result_norm", -1);
-#ifdef GGML_USE_CUBLAS
- if (n_gpu_layers > n_layer) {
- offload_func_nr = ggml_cuda_assign_buffers_no_alloc;
- }
- if (n_gpu_layers > n_layer + 1) {
- offload_func_v = ggml_cuda_assign_buffers_no_alloc;
- }
- if (n_gpu_layers > n_layer + 2) {
- offload_func_kq = ggml_cuda_assign_buffers_no_alloc;
- }
-#endif // GGML_USE_CUBLAS
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output", -1);
- struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- ggml_allocr_alloc(lctx.alloc, KQ_scale);
- if (!ggml_allocr_is_measure(lctx.alloc)) {
- ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
+ ggml_build_forward_expand(gf, cur);
+
+ return gf;
}
- ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
- for (int il = 0; il < n_layer; ++il) {
- ggml_format_name(inpL, "layer_inp_%d", il);
+ struct ggml_cgraph * build_refact() {
+ struct ggml_cgraph * gf = ggml_new_graph(ctx0);
- offload_func_t offload_func = llama_nop;
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
-#ifdef GGML_USE_CUBLAS
- if (il >= i_gpu_start) {
- offload_func = ggml_cuda_assign_buffers_no_alloc;
- }
-#endif // GGML_USE_CUBLAS
+ inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
+ cb(inpL, "inp_embd", -1);
- struct ggml_tensor * inpSA = inpL;
+ // KQ_scale
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ cb(KQ_scale, "KQ_scale", -1);
- // norm
- {
- cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps);
- offload_func(cur);
- ggml_set_name(cur, "rms_norm_0");
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ cb(KQ_mask, "KQ_mask", -1);
- // cur = cur*attn_norm(broadcasted)
- cur = ggml_mul(ctx0, cur, model.layers[il].attn_norm);
- offload_func(cur);
- ggml_set_name(cur, "attention_norm_0");
- }
+ for (int il = 0; il < n_layer; ++il) {
+ struct ggml_tensor * inpSA = inpL;
- // self-attention
- {
- // compute Q and K and RoPE them
- struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
- offload_func_kq(tmpk);
- ggml_set_name(tmpk, "tmpk");
-
- struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
- offload_func_kq(tmpq);
- ggml_set_name(tmpq, "tmpq");
-
- struct ggml_tensor * Kcur;
- struct ggml_tensor * Qcur;
- switch (model.type) {
- case MODEL_7B:
- Kcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
- Qcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
- break;
- case MODEL_13B:
- Kcur = ggml_reshape_3d(ctx0, tmpk, n_embd/n_head, n_head, N);
- Qcur = ggml_reshape_3d(ctx0, tmpq, n_embd/n_head, n_head, N);
- break;
- default:
- GGML_ASSERT(false);
+ cur = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm, NULL,
+ LLM_NORM_RMS, cb, il);
+ cb(cur, "attn_norm", il);
+
+ // self-attention
+ {
+ struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
+ cb(Qcur, "Qcur", il);
+
+ struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
+ cb(Kcur, "Kcur", il);
+
+ struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
+ cb(Vcur, "Vcur", il);
+
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+ cb(Kcur, "Kcur", il);
+
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+ cb(Qcur, "Qcur", il);
+
+ llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
+
+ cur = llm_build_kqv(ctx0, hparams, kv_self,
+ model.layers[il].wo, NULL,
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il);
+ cb(cur, "kqv_out", il);
}
- offload_func_kq(Kcur);
- ggml_set_name(Kcur, "Kcur");
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
+ cb(ffn_inp, "ffn_inp", il);
+
+ // feed-forward network
+ {
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
+ model.layers[il].ffn_norm, NULL,
+ LLM_NORM_RMS, cb, il);
+ cb(cur, "ffn_norm", il);
+
+ cur = llm_build_ffn(ctx0, cur,
+ model.layers[il].ffn_up, NULL,
+ model.layers[il].ffn_gate, NULL,
+ model.layers[il].ffn_down, NULL,
+ LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
+ cb(cur, "ffn_out", il);
+ }
+
+ cur = ggml_add(ctx0, cur, ffn_inp);
+ cb(cur, "l_out", il);
+
+ // input for next layer
+ inpL = cur;
+ }
+
+ cur = inpL;
+
+ cur = llm_build_norm(ctx0, cur, hparams,
+ model.output_norm, NULL,
+ LLM_NORM_RMS, cb, -1);
+ cb(cur, "result_norm", -1);
+
+ // lm_head
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output", -1);
+
+ ggml_build_forward_expand(gf, cur);
+
+ return gf;
+ }
+
+ struct ggml_cgraph * build_bloom() {
+ struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
- offload_func_kq(Qcur);
- ggml_set_name(Qcur, "Qcur");
+ inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
+ cb(inpL, "inp_embd", -1);
- // store key and value to memory
+ // KQ_scale
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ cb(KQ_scale, "KQ_scale", -1);
+
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ cb(KQ_mask, "KQ_mask", -1);
+
+ inpL = llm_build_norm(ctx0, inpL, hparams,
+ model.tok_norm,
+ model.tok_norm_b,
+ LLM_NORM, cb, -1);
+ cb(inpL, "inp_norm", -1);
+
+ for (int il = 0; il < n_layer; ++il) {
+ cur = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm,
+ model.layers[il].attn_norm_b,
+ LLM_NORM, cb, il);
+ cb(cur, "attn_norm", il);
+
+ // self-attention
{
- // compute the transposed [N, n_embd] V matrix
+ cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
+ cb(cur, "wqkv", il);
- struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
- offload_func_v(tmpv);
- ggml_set_name(tmpv, "tmpv");
+ cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
+ cb(cur, "bqkv", il);
- struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, N));
- offload_func_v(Vcur);
- ggml_set_name(Vcur, "Vcur");
+ struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
- struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + n_past));
- offload_func_kq(k);
- ggml_set_name(k, "k");
+ cb(Qcur, "Qcur", il);
+ cb(Kcur, "Kcur", il);
+ cb(Vcur, "Vcur", il);
- struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd_gqa,
- ( n_ctx)*ggml_element_size(kv_self.v),
- (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + n_past*ggml_element_size(kv_self.v));
- offload_func_v(v);
- ggml_set_name(v, "v");
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
- // important: storing RoPE-ed version of K in the KV cache!
- ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
- ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
- }
+ llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
- offload_func_kq(Q);
- ggml_set_name(Q, "Q");
-
- struct ggml_tensor * K =
- ggml_view_3d(ctx0, kv_self.k,
- n_embd_head, n_past + N, n_head_kv,
- ggml_element_size(kv_self.k)*n_embd_gqa,
- ggml_element_size(kv_self.k)*n_embd_head,
- ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il);
- offload_func_kq(K);
- ggml_set_name(K, "K");
-
- // K * Q
- struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
- offload_func_kq(KQ);
- ggml_set_name(KQ, "KQ");
-
- // KQ_scaled = KQ / sqrt(n_embd_head)
- // KQ_scaled shape [n_past + N, N, n_head, 1]
- struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
- offload_func_kq(KQ_scaled);
- ggml_set_name(KQ_scaled, "KQ_scaled");
-
- struct ggml_tensor * KQ_masked;
- struct ggml_tensor * KQ_scaled_alibi;
-
- switch (model.type) {
- case MODEL_7B:
- KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
- break;
- case MODEL_13B:
- KQ_scaled_alibi =ggml_alibi(ctx0, KQ_scaled, n_past, n_head, 8);
- ggml_set_name(KQ_scaled_alibi, "KQ_scaled_alibi");
- KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled_alibi, n_past);
- break;
- default:
- GGML_ASSERT(false);
+ cur = llm_build_kqv(ctx0, hparams, kv_self,
+ model.layers[il].wo, model.layers[il].bo,
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il);
+ cb(cur, "kqv_out", il);
}
- // KQ_masked = mask_past(KQ_scaled)
- // struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
- // struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled_alibi, n_past);
- // offload_func_kq(KQ_masked);
- // ggml_set_name(KQ_masked, "KQ_masked");
-
- // KQ = soft_max(KQ_masked)
- struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
- offload_func_v(KQ_soft_max);
- ggml_set_name(KQ_soft_max, "KQ_soft_max");
-
- // split cached V into n_head heads
- struct ggml_tensor * V =
- ggml_view_3d(ctx0, kv_self.v,
- n_past + N, n_embd_head, n_head_kv,
- ggml_element_size(kv_self.v)*n_ctx,
- ggml_element_size(kv_self.v)*n_ctx*n_embd_head,
- ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il);
- offload_func_v(V);
- ggml_set_name(V, "V");
-
-#if 1
- struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
- offload_func_v(KQV);
- ggml_set_name(KQV, "KQV");
-#else
- // make V contiguous in memory to speed up the matmul, however we waste time on the copy
- // on M1 this is faster for the perplexity computation, but ~5% slower for the single-token generation
- // is there a better way?
- struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd_head, n_head));
- struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_cont, KQ_soft_max);
-#endif
- // KQV_merged = KQV.permute(0, 2, 1, 3)
- struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
- offload_func_v(KQV_merged);
- ggml_set_name(KQV_merged, "KQV_merged");
+ // Add the input
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
+ cb(ffn_inp, "ffn_inp", il);
- // cur = KQV_merged.contiguous().view(n_embd, N)
- cur = ggml_cpy(ctx0,
- KQV_merged,
- ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
- offload_func_v(cur);
- ggml_set_name(cur, "KQV_merged_contiguous");
+ // FF
+ {
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
+ model.layers[il].ffn_norm,
+ model.layers[il].ffn_norm_b,
+ LLM_NORM, cb, il);
+ cb(cur, "ffn_norm", il);
+
+ cur = llm_build_ffn(ctx0, cur,
+ model.layers[il].ffn_up, model.layers[il].ffn_up_b,
+ NULL, NULL,
+ model.layers[il].ffn_down, model.layers[il].ffn_down_b,
+ LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
+ cb(cur, "ffn_out", il);
+ }
- // projection (no bias)
- cur = ggml_mul_mat(ctx0,
- model.layers[il].wo,
- cur);
- offload_func(cur);
- ggml_set_name(cur, "result_wo");
+ inpL = ggml_add(ctx0, cur, ffn_inp);
+ cb(inpL, "l_out", il);
}
- struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
- offload_func(inpFF);
- ggml_set_name(inpFF, "inpFF");
+ cur = llm_build_norm(ctx0, inpL, hparams,
+ model.output_norm,
+ model.output_norm_b,
+ LLM_NORM, cb, -1);
+ cb(cur, "result_norm", -1);
- // feed-forward network
- {
- // norm
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output", -1);
+
+ ggml_build_forward_expand(gf, cur);
+
+ return gf;
+ }
+
+ struct ggml_cgraph * build_mpt() {
+ struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
+
+ inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
+ cb(inpL, "inp_embd", -1);
+
+ // KQ_scale
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ cb(KQ_scale, "KQ_scale", -1);
+
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ cb(KQ_mask, "KQ_mask", -1);
+
+ for (int il = 0; il < n_layer; ++il) {
+ struct ggml_tensor * attn_norm;
+
+ attn_norm = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm,
+ NULL,
+ LLM_NORM, cb, il);
+ cb(attn_norm, "attn_norm", il);
+
+ // self-attention
{
- cur = ggml_rms_norm(ctx0, inpFF, norm_rms_eps);
- offload_func(cur);
- ggml_set_name(cur, "rms_norm_1");
-
- // cur = cur*ffn_norm(broadcasted)
- cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm);
- offload_func(cur);
- ggml_set_name(cur, "ffn_norm");
+ cur = attn_norm;
+
+ cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
+ cb(cur, "wqkv", il);
+
+ if (hparams.f_clamp_kqv > 0.0f) {
+ cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
+ cb(cur, "wqkv_clamped", il);
+ }
+
+ struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
+
+ cb(Qcur, "Qcur", il);
+ cb(Kcur, "Kcur", il);
+ cb(Vcur, "Vcur", il);
+
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+
+ llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
+
+ cur = llm_build_kqv(ctx0, hparams, kv_self,
+ model.layers[il].wo, NULL,
+ Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, cb, il);
+ cb(cur, "kqv_out", il);
}
- struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
- model.layers[il].w3,
- cur);
- offload_func(tmp);
- ggml_set_name(tmp, "result_w3");
+ // Add the input
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
+ cb(ffn_inp, "ffn_inp", il);
- cur = ggml_mul_mat(ctx0,
- model.layers[il].w1,
- cur);
- offload_func(cur);
- ggml_set_name(cur, "result_w1");
+ // feed forward
+ {
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
+ model.layers[il].ffn_norm,
+ NULL,
+ LLM_NORM, cb, il);
+ cb(cur, "ffn_norm", il);
+
+ cur = llm_build_ffn(ctx0, cur,
+ model.layers[il].ffn_up, NULL,
+ NULL, NULL,
+ model.layers[il].ffn_down, NULL,
+ LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
+ cb(cur, "ffn_out", il);
+ }
- // SILU activation
- cur = ggml_silu(ctx0, cur);
- offload_func(cur);
- ggml_set_name(cur, "silu");
+ cur = ggml_add(ctx0, cur, ffn_inp);
+ cb(cur, "l_out", il);
+
+ // input for next layer
+ inpL = cur;
+ }
- cur = ggml_mul(ctx0, cur, tmp);
- offload_func(cur);
- ggml_set_name(cur, "silu_x_result_w3");
+ cur = inpL;
- cur = ggml_mul_mat(ctx0,
- model.layers[il].w2,
- cur);
- offload_func(cur);
- ggml_set_name(cur, "result_w2");
+ cur = llm_build_norm(ctx0, cur, hparams,
+ model.output_norm,
+ NULL,
+ LLM_NORM, cb, -1);
+ cb(cur, "result_norm", -1);
+
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output", -1);
+
+ ggml_build_forward_expand(gf, cur);
+
+ return gf;
+ }
+};
+
+//
+// tensor offloading helpers
+//
+// TODO: will be removed with backend v2
+
+enum llm_offload_func_e {
+ OFFLOAD_FUNC_NOP,
+ OFFLOAD_FUNC,
+ OFFLOAD_FUNC_KQ,
+ OFFLOAD_FUNC_V,
+ OFFLOAD_FUNC_NR,
+ OFFLOAD_FUNC_EMB,
+ OFFLOAD_FUNC_OUT,
+};
+
+// TODO: will be removed with backend v2
+struct llm_offload_trie {
+ struct node {
+ ~node() {
+ for (int i = 0; i < 256; ++i) {
+ if (children[i]) {
+ delete children[i];
+ }
+ }
}
- cur = ggml_add(ctx0, cur, inpFF);
- offload_func(cur);
- ggml_set_name(cur, "inpFF_+_result_w2");
+ node * children[256] = { nullptr };
+ llm_offload_func_e func = OFFLOAD_FUNC_NOP;
+ };
- // input for next layer
- inpL = cur;
+ llm_offload_trie() {
+ root = new node;
}
- cur = inpL;
+ llm_offload_trie(const std::unordered_map<const char *, llm_offload_func_e> & map) {
+ root = new node;
- // norm
- {
- cur = ggml_rms_norm(ctx0, cur, norm_rms_eps);
- offload_func_nr(cur);
- ggml_set_name(cur, "rms_norm_2");
+ for (const auto & kv : map) {
+ add(kv.first, kv.second);
+ }
+ }
- // cur = cur*norm(broadcasted)
- cur = ggml_mul(ctx0, cur, model.output_norm);
- // offload_func_nr(cur); // TODO CPU + GPU mirrored backend
- ggml_set_name(cur, "result_norm");
+ ~llm_offload_trie() {
+ delete root;
}
- // lm_head
- cur = ggml_mul_mat(ctx0, model.output, cur);
- ggml_set_name(cur, "result_output");
+ void add(const char * name, llm_offload_func_e func) {
+ node * cur = root;
- ggml_build_forward_expand(gf, cur);
+ for (int i = 0; ; ++i) {
+ const uint8_t c = name[i];
- ggml_free(ctx0);
+ if (!c) {
+ break;
+ }
- return gf;
-}
+ if (!cur->children[c]) {
+ cur->children[c] = new node;
+ }
-static struct ggml_cgraph * llm_build_falcon(
- llama_context & lctx,
- const llama_token * tokens,
- const float * embd,
- int n_tokens,
- int n_past) {
+ cur = cur->children[c];
+ }
- GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT
+ cur->func = func;
+ }
- const int N = n_tokens;
+ llm_offload_func_e find(const char * name) const {
+ const node * cur = root;
- const auto & model = lctx.model;
- const auto & hparams = model.hparams;
+ for (int i = 0; ; ++i) {
+ const uint8_t c = name[i];
- const auto & kv_self = lctx.kv_self;
+ if (!c) {
+ break;
+ }
- GGML_ASSERT(!!kv_self.ctx);
+ if (!cur->children[c]) {
+ return OFFLOAD_FUNC_NOP;
+ }
- const int64_t n_embd = hparams.n_embd;
- const int64_t n_layer = hparams.n_layer;
- const int64_t n_ctx = hparams.n_ctx;
- const int64_t n_head = hparams.n_head;
- const int64_t n_head_kv = hparams.n_head_kv;
- const int64_t n_embd_head = hparams.n_embd_head();
- const int64_t n_embd_gqa = hparams.n_embd_gqa();
+ cur = cur->children[c];
+ }
+
+ return cur->func;
+ }
- GGML_ASSERT(n_embd_head == hparams.n_rot);
+ node * root = nullptr;
+};
- const float freq_base = hparams.rope_freq_base;
- const float freq_scale = hparams.rope_freq_scale;
- const float norm_eps = hparams.f_norm_eps;
+// TODO: will be removed with backend v2
+static const std::unordered_map<const char *, llm_offload_func_e> k_offload_map = {
+ //{ "inp_tokens", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel
+ //{ "inp_embd", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel
+ { "pos_embd", OFFLOAD_FUNC_NR },
+
+ { "inp_pos", OFFLOAD_FUNC_KQ }, // this is often used for KQ ops (e.g. rope)
+ { "KQ_scale", OFFLOAD_FUNC_KQ },
+ { "KQ_mask", OFFLOAD_FUNC_KQ },
+ { "K_shift", OFFLOAD_FUNC_KQ },
+ { "K_shifted", OFFLOAD_FUNC_KQ },
+
+ { "inp_norm", OFFLOAD_FUNC_NR },
+ { "inp_norm_w", OFFLOAD_FUNC_NR },
+ { "inp_norm_wb", OFFLOAD_FUNC_NR },
+
+ { "norm", OFFLOAD_FUNC },
+ { "norm_w", OFFLOAD_FUNC },
+ { "norm_wb", OFFLOAD_FUNC },
+
+ { "attn_norm", OFFLOAD_FUNC },
+ { "attn_norm_2", OFFLOAD_FUNC },
+
+ { "wqkv", OFFLOAD_FUNC_KQ },
+ { "bqkv", OFFLOAD_FUNC_KQ },
+ { "wqkv_clamped", OFFLOAD_FUNC_KQ },
+
+ { "tmpk", OFFLOAD_FUNC_KQ },
+ { "tmpq", OFFLOAD_FUNC_KQ },
+ { "tmpv", OFFLOAD_FUNC_V },
+ { "Kcur", OFFLOAD_FUNC_KQ },
+ { "Qcur", OFFLOAD_FUNC_KQ },
+ { "Vcur", OFFLOAD_FUNC_V },
+
+ { "krot", OFFLOAD_FUNC_KQ },
+ { "qrot", OFFLOAD_FUNC_KQ },
+ { "kpass", OFFLOAD_FUNC_KQ },
+ { "qpass", OFFLOAD_FUNC_KQ },
+ { "krotated", OFFLOAD_FUNC_KQ },
+ { "qrotated", OFFLOAD_FUNC_KQ },
+
+ { "q", OFFLOAD_FUNC_KQ },
+ { "k", OFFLOAD_FUNC_KQ },
+ { "kq", OFFLOAD_FUNC_KQ },
+ { "kq_scaled", OFFLOAD_FUNC_KQ },
+ { "kq_scaled_alibi", OFFLOAD_FUNC_KQ },
+ { "kq_masked", OFFLOAD_FUNC_KQ },
+ { "kq_soft_max", OFFLOAD_FUNC_V },
+ { "v", OFFLOAD_FUNC_V },
+ { "kqv", OFFLOAD_FUNC_V },
+ { "kqv_merged", OFFLOAD_FUNC_V },
+ { "kqv_merged_cont", OFFLOAD_FUNC_V },
+ { "kqv_wo", OFFLOAD_FUNC_V },
+ { "kqv_out", OFFLOAD_FUNC_V },
+
+ { "ffn_inp", OFFLOAD_FUNC },
+ { "ffn_norm", OFFLOAD_FUNC },
+
+ { "ffn_up", OFFLOAD_FUNC },
+ { "ffn_up_b", OFFLOAD_FUNC },
+ { "ffn_gate", OFFLOAD_FUNC },
+ { "ffn_gate_b", OFFLOAD_FUNC },
+ { "ffn_gate_par", OFFLOAD_FUNC },
+ { "ffn_down", OFFLOAD_FUNC },
+ { "ffn_down_b", OFFLOAD_FUNC },
+ { "ffn_out", OFFLOAD_FUNC },
+
+ { "ffn_silu", OFFLOAD_FUNC },
+ { "ffn_gelu", OFFLOAD_FUNC },
+ { "ffn_relu", OFFLOAD_FUNC },
+ { "ffn_sqr(relu)", OFFLOAD_FUNC },
+
+ { "l_out", OFFLOAD_FUNC },
+
+ { "result_norm", OFFLOAD_FUNC_EMB },
+ { "result_output", OFFLOAD_FUNC_OUT },
+};
- const int n_gpu_layers = model.n_gpu_layers;
+static llm_offload_trie k_offload_func_trie(k_offload_map);
- auto & buf_compute = lctx.buf_compute;
+static struct ggml_cgraph * llama_build_graph(
+ llama_context & lctx,
+ const llama_batch & batch) {
+ const auto & model = lctx.model;
- struct ggml_init_params params = {
- /*.mem_size =*/ buf_compute.size,
- /*.mem_buffer =*/ buf_compute.data,
- /*.no_alloc =*/ false,
- };
+ // check if we should build the worst-case graph (for memory measurement)
+ const bool worst_case = ggml_allocr_is_measure(lctx.alloc);
- params.no_alloc = true;
+ // keep track of the input that has already been allocated
+ bool alloc_inp_tokens = false;
+ bool alloc_inp_embd = false;
+ bool alloc_inp_pos = false;
+ bool alloc_inp_KQ_scale = false;
+ bool alloc_inp_KQ_mask = false;
+ bool alloc_inp_K_shift = false;
- struct ggml_context * ctx0 = ggml_init(params);
+#ifdef GGML_USE_CUBLAS
+ const bool do_offload = true;
+#else
+ const bool do_offload = true; // TODO: set to false after finishing refactoring
+#endif
- ggml_cgraph * gf = ggml_new_graph(ctx0);
+ int n_non_view = 0; // number of non-view tensors that have been processed by the callback
- struct ggml_tensor * cur;
- struct ggml_tensor * inpL;
+ // this callback allows us to apply custom logic to each tensor (e.g. ggml-alloc, offloading, etc.)
+ // TODO: will be removed with backend v2
+ llm_build_cb cb = [&](struct ggml_tensor * cur, const char * name, int il) {
+ if (il >= 0) {
+ ggml_format_name(cur, "%s-%d", name, il);
+ } else {
+ ggml_set_name(cur, name);
+ }
- if (tokens) {
- struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
+ //
+ // allocate input tensors and set input data
+ //
+ // TODO: will be removed with backend v2
- ggml_allocr_alloc(lctx.alloc, inp_tokens);
- if (!ggml_allocr_is_measure(lctx.alloc)) {
- memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
- }
- ggml_set_name(inp_tokens, "inp_tokens");
+ if (!alloc_inp_tokens && strcmp(name, "inp_tokens") == 0) {
+ ggml_allocr_alloc(lctx.alloc, cur);
- inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
- } else {
-#ifdef GGML_USE_MPI
- GGML_ASSERT(false && "not implemented");
-#endif
+ if (!ggml_allocr_is_measure(lctx.alloc) && batch.token) {
+ const int64_t n_tokens = cur->ne[0];
- inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
+ memcpy(cur->data, batch.token, n_tokens*ggml_element_size(cur));
+ }
- ggml_allocr_alloc(lctx.alloc, inpL);
- if (!ggml_allocr_is_measure(lctx.alloc)) {
- memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
+ alloc_inp_tokens = true;
}
- }
- const int i_gpu_start = n_layer - n_gpu_layers;
- (void) i_gpu_start;
+ if (!alloc_inp_embd && strcmp(name, "inp_embd") == 0) {
+ ggml_allocr_alloc(lctx.alloc, cur);
- // offload functions set the tensor output backend to GPU
- // tensors are GPU-accelerated if any input or the output has been offloaded
- //
- // with the low VRAM option VRAM scratch is disabled in llama_load_model_internal
- // in that case ggml_cuda_assign_buffers has no effect
- offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
- offload_func_t offload_func_kq = llama_nop;
- offload_func_t offload_func_v = llama_nop;
+ if (!ggml_allocr_is_measure(lctx.alloc) && batch.embd) {
+ const int64_t n_embd = cur->ne[0];
+ const int64_t n_tokens = cur->ne[1];
-#ifdef GGML_USE_CUBLAS
- if (n_gpu_layers > n_layer) {
- offload_func_nr = ggml_cuda_assign_buffers_no_alloc;
- }
- if (n_gpu_layers > n_layer + 1) {
- offload_func_v = ggml_cuda_assign_buffers_no_alloc;
- }
- if (n_gpu_layers > n_layer + 2) {
- offload_func_kq = ggml_cuda_assign_buffers_no_alloc;
- }
-#endif // GGML_USE_CUBLAS
+ memcpy(cur->data, batch.embd, n_tokens*n_embd*ggml_element_size(cur));
+ }
- struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- ggml_allocr_alloc(lctx.alloc, KQ_scale);
- if (!ggml_allocr_is_measure(lctx.alloc)) {
- ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
- }
- ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
+ alloc_inp_embd = true;
+ }
- for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * attn_norm;
+ if (!alloc_inp_pos && strcmp(name, "inp_pos") == 0) {
+ ggml_allocr_alloc(lctx.alloc, cur);
- offload_func_t offload_func = llama_nop;
+ if (!ggml_allocr_is_measure(lctx.alloc) && batch.pos) {
+ const int64_t n_tokens = cur->ne[0];
-#ifdef GGML_USE_CUBLAS
- if (il >= i_gpu_start) {
- offload_func = ggml_cuda_assign_buffers_no_alloc;
- }
-#endif // GGML_USE_CUBLAS
+ int32_t * data = (int32_t *) cur->data;
- // self-attention
- // TODO: refactor into common function (shared with LLaMA)
- {
- attn_norm = ggml_norm(ctx0, inpL, norm_eps);
- offload_func(attn_norm);
-
- attn_norm = ggml_add(ctx0,
- ggml_mul(ctx0, attn_norm, model.layers[il].attn_norm),
- model.layers[il].attn_norm_b);
- offload_func(attn_norm->src[0]);
- offload_func(attn_norm);
-
- if (model.layers[il].attn_norm_2) { // Falcon-40B
- cur = ggml_norm(ctx0, inpL, norm_eps);
- offload_func(cur);
-
- cur = ggml_add(ctx0,
- ggml_mul(ctx0, cur, model.layers[il].attn_norm_2),
- model.layers[il].attn_norm_2_b);
- offload_func(cur->src[0]);
- offload_func(cur);
- } else { // Falcon 7B
- cur = attn_norm;
+ for (int i = 0; i < n_tokens; ++i) {
+ data[i] = batch.pos[i];
+ }
}
- // compute QKV
-
- cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
- offload_func_kq(cur);
-
- // Note that the strides for Kcur, Vcur are set up so that the
- // resulting views are misaligned with the tensor's storage
- // (by applying the K/V offset we shift the tensor's original
- // view to stick out behind the viewed QKV tensor's allocated
- // memory, so to say). This is ok because no actual accesses
- // happen to that out-of-range memory, but it can require some
- // trickery when trying to accurately dump these views for
- // debugging.
-
- const size_t wsize = ggml_type_size(cur->type);
-
- // TODO: these 2 ggml_conts are technically not needed, but we add them until CUDA support for
- // non-contiguous views is added for the rope operator
- struct ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_3d(
- ctx0, cur, n_embd_head, n_head, N,
- wsize * n_embd_head,
- wsize * n_embd_head * (n_head + 2 * n_head_kv),
- 0));
- offload_func_kq(tmpq);
-
- struct ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_3d(
- ctx0, cur, n_embd_head, n_head_kv, N,
- wsize * n_embd_head,
- wsize * n_embd_head * (n_head + 2 * n_head_kv),
- wsize * n_embd_head * n_head));
- offload_func_kq(tmpk);
-
- struct ggml_tensor * tmpv = ggml_view_3d(
- ctx0, cur, n_embd_head, n_head_kv, N,
- wsize * n_embd_head,
- wsize * n_embd_head * (n_head + 2 * n_head_kv),
- wsize * n_embd_head * (n_head + n_head_kv));
- offload_func_v(tmpv);
-
- // using mode = 2 for neox mode
- struct ggml_tensor * Qcur = ggml_rope_custom_inplace(ctx0, tmpq, n_past, n_embd_head, 2, 0, freq_base, freq_scale);
- offload_func_kq(Qcur);
- struct ggml_tensor * Kcur = ggml_rope_custom_inplace(ctx0, tmpk, n_past, n_embd_head, 2, 0, freq_base, freq_scale);
- offload_func_kq(Kcur);
+ alloc_inp_pos = true;
+ }
- {
- struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, ggml_cont(ctx0, tmpv), n_embd_gqa, N));
- offload_func_v(Vcur);
- offload_func_v(Vcur->src[0]->src[0]);
- ggml_set_name(Vcur, "Vcur");
-
- struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + n_past));
- offload_func_kq(k);
- ggml_set_name(k, "k");
-
- struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd_gqa,
- ( n_ctx)*ggml_element_size(kv_self.v),
- (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + n_past*ggml_element_size(kv_self.v));
- offload_func_v(v);
-
- ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
- ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
+ if (!alloc_inp_KQ_scale && strcmp(name, "KQ_scale") == 0) {
+ ggml_allocr_alloc(lctx.alloc, cur);
+
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ const int64_t n_embd_head = model.hparams.n_embd_head();
+ ggml_set_f32(cur, 1.0f/sqrtf(float(n_embd_head)));
}
- struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
- offload_func_kq(Q);
- ggml_set_name(Q, "Q");
+ alloc_inp_KQ_scale = true;
+ }
+
+ if (!alloc_inp_KQ_mask && strcmp(name, "KQ_mask") == 0) {
+ ggml_allocr_alloc(lctx.alloc, cur);
+
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ const int64_t n_kv = cur->ne[0];
+ const int64_t n_tokens = cur->ne[1];
- struct ggml_tensor * K =
- ggml_view_3d(ctx0, kv_self.k,
- n_embd_head, n_past + N, n_head_kv,
- ggml_element_size(kv_self.k)*n_embd_gqa,
- ggml_element_size(kv_self.k)*n_embd_head,
- ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il);
- offload_func_kq(K);
- ggml_set_name(K, "K");
+ float * data = (float *) cur->data;
+ memset(data, 0, ggml_nbytes(cur));
- struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
- offload_func_kq(KQ);
- ggml_set_name(KQ, "KQ");
+ for (int h = 0; h < 1; ++h) {
+ for (int j = 0; j < n_tokens; ++j) {
+ const llama_pos pos = batch.pos[j];
+ const llama_seq_id seq_id = batch.seq_id[j][0];
- struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
- offload_func_kq(KQ_scaled);
- ggml_set_name(KQ_scaled, "KQ_scaled");
+ for (int i = 0; i < n_kv; ++i) {
+ if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
+ data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY;
+ }
+ }
+ }
+ }
+ }
- struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
- offload_func_kq(KQ_masked);
- ggml_set_name(KQ_masked, "KQ_masked");
+ alloc_inp_KQ_mask = true;
+ }
- struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
- offload_func_v(KQ_soft_max);
- ggml_set_name(KQ_soft_max, "KQ_soft_max");
+ if (!alloc_inp_K_shift && strcmp(name, "K_shift") == 0) {
+ ggml_allocr_alloc(lctx.alloc, cur);
- struct ggml_tensor * V =
- ggml_view_3d(ctx0, kv_self.v,
- n_past + N, n_embd_head, n_head_kv,
- ggml_element_size(kv_self.v)*n_ctx,
- ggml_element_size(kv_self.v)*n_ctx*n_embd_head,
- ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il);
- offload_func_v(V);
- ggml_set_name(V, "V");
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ const int64_t n_ctx = cur->ne[0];
- struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
- offload_func_v(KQV);
- ggml_set_name(KQV, "KQV");
+ int32_t * data = (int32_t *) cur->data;
- struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
- offload_func_v(KQV_merged);
- ggml_set_name(KQV_merged, "KQV_merged");
+ for (int i = 0; i < n_ctx; ++i) {
+ data[i] = lctx.kv_self.cells[i].delta;
+ }
+ }
- cur = ggml_cpy(ctx0, KQV_merged, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
- offload_func_v(cur);
- ggml_set_name(cur, "KQV_merged_contiguous");
+ alloc_inp_K_shift = true;
+ }
- cur = ggml_mul_mat(ctx0, model.layers[il].wo, cur);
- offload_func(cur);
- ggml_set_name(cur, "result_wo");
+ // view tensors are not processed further
+ if (cur->view_src != nullptr) {
+ return;
}
- struct ggml_tensor * attn_out = cur;
+ if (cur->op != GGML_OP_NONE) {
+ n_non_view++;
+ }
- // feed forward
- {
- struct ggml_tensor * inpFF = attn_norm;
+ //
+ // offload layers
+ //
+ // TODO: will be removed with backend v2
- cur = ggml_mul_mat(ctx0, model.layers[il].w3, inpFF);
- offload_func(cur);
+//#define LLAMA_OFFLOAD_DEBUG
- cur = ggml_gelu(ctx0, cur);
- offload_func(cur);
- cur = ggml_mul_mat(ctx0, model.layers[il].w2, cur);
- offload_func(cur);
+ if (!do_offload) {
+ return;
}
- cur = ggml_add(ctx0, cur, attn_out);
- offload_func(cur);
- cur = ggml_add(ctx0, cur, inpL);
- offload_func(cur);
+ const int n_layer = model.hparams.n_layer;
- // input for next layer
- inpL = cur;
- }
+ const int n_gpu_layers = model.n_gpu_layers;
+ const int i_gpu_start = n_layer - n_gpu_layers;
- cur = inpL;
+ // should we offload the final norm? yes if we are not computing embeddings
+ const bool offload_emb = lctx.embedding.empty();
- // norm
- {
- cur = ggml_norm(ctx0, cur, norm_eps);
- offload_func_nr(cur);
+ static const std::unordered_map<llm_offload_func_e, std::string, std::hash<int>> k_offload_func_name = {
+ { OFFLOAD_FUNC_NOP, "CPU" },
+ { OFFLOAD_FUNC_OUT, "CPU" },
+#ifdef GGML_USE_CUBLAS
+ { OFFLOAD_FUNC, "GPU (CUDA)" },
+ { OFFLOAD_FUNC_KQ, "GPU (CUDA) KQ" },
+ { OFFLOAD_FUNC_V, "GPU (CUDA) V" },
+ { OFFLOAD_FUNC_NR, "GPU (CUDA) NR" },
+ { OFFLOAD_FUNC_EMB, "GPU (CUDA) EMB" },
+#else
+ { OFFLOAD_FUNC, "CPU" },
+ { OFFLOAD_FUNC_KQ, "CPU" },
+ { OFFLOAD_FUNC_V, "CPU" },
+ { OFFLOAD_FUNC_NR, "CPU" },
+ { OFFLOAD_FUNC_EMB, "CPU" },
+#endif // GGML_USE_CUBLAS
+ };
- cur = ggml_add(ctx0,
- ggml_mul(ctx0, cur, model.output_norm),
- model.output_norm_b);
- ggml_set_name(cur, "result_norm");
- }
+ // check the global map for what offload function to use for this tensor
+ llm_offload_func_e func_e = k_offload_func_trie.find(name);
+
+ if (func_e == OFFLOAD_FUNC_NOP) {
+#ifdef LLAMA_OFFLOAD_DEBUG
+ // if a tensor hasn't been offloaded, we warn the user
+ if (worst_case) {
+ LLAMA_LOG_WARN("%s: %32s: not offloaded (ref: %s)\n", __func__,
+ cur->name, "https://github.com/ggerganov/llama.cpp/pull/3837");
+ }
+#endif
+
+ return;
+ }
- cur = ggml_mul_mat(ctx0, model.output, cur);
- ggml_set_name(cur, "result_output");
+ // count the number of layers and respect the provided n_gpu_layers
+ switch (func_e) {
+ case OFFLOAD_FUNC_NOP:
+ case OFFLOAD_FUNC_OUT:
+ break;
+ case OFFLOAD_FUNC:
+ if (n_gpu_layers < n_layer) {
+ if (il < i_gpu_start) {
+ func_e = OFFLOAD_FUNC_NOP;
+ }
+ }
+ break;
+ case OFFLOAD_FUNC_NR:
+ if (n_gpu_layers <= n_layer + 0) {
+ func_e = OFFLOAD_FUNC_NOP;
+ }
+ break;
+ case OFFLOAD_FUNC_V:
+ if (n_gpu_layers <= n_layer + 1) {
+ func_e = OFFLOAD_FUNC_NOP;
+ }
+ break;
+ case OFFLOAD_FUNC_KQ:
+ if (n_gpu_layers <= n_layer + 2) {
+ func_e = OFFLOAD_FUNC_NOP;
+ }
+ break;
+ case OFFLOAD_FUNC_EMB:
+ if (!offload_emb || n_gpu_layers < n_layer) {
+ func_e = OFFLOAD_FUNC_NOP;
+ }
+ break;
+ default: GGML_ASSERT(false);
+ }
- ggml_build_forward_expand(gf, cur);
+ offload_func_t func = ggml_offload_nop;
- ggml_free(ctx0);
+ // this is needed for compatibility with Metal for example
+#ifdef GGML_USE_CUBLAS
+ static offload_func_t ggml_offload_gpu = ggml_cuda_assign_buffers_no_alloc;
+#else
+ static offload_func_t ggml_offload_gpu = ggml_offload_nop;
+#endif
- return gf;
-}
+ switch (func_e) {
+ case OFFLOAD_FUNC_NOP:
+ case OFFLOAD_FUNC_OUT: func = ggml_offload_nop; break;
+ case OFFLOAD_FUNC:
+ case OFFLOAD_FUNC_KQ:
+ case OFFLOAD_FUNC_V:
+ case OFFLOAD_FUNC_NR:
+ case OFFLOAD_FUNC_EMB: func = ggml_offload_gpu; break;
+ default: GGML_ASSERT(false);
+ }
-static struct ggml_cgraph * llama_build_graph(
- llama_context & lctx,
- const llama_token * tokens,
- const float * embd,
- int n_tokens,
- int n_past) {
- const auto & model = lctx.model;
+ // apply offload function to the tensor
+ func(cur);
+
+#ifdef LLAMA_OFFLOAD_DEBUG
+ if (worst_case) {
+ LLAMA_LOG_INFO("%s: %32s: %s\n", __func__, cur->name, k_offload_func_name.at(func_e).c_str());
+ }
+#endif
+ };
struct ggml_cgraph * result = NULL;
+ struct llm_build_context llm(lctx, batch, cb, worst_case);
+
+ llm.init();
+
switch (model.arch) {
case LLM_ARCH_LLAMA:
{
- result = llm_build_llama(lctx, tokens, embd, n_tokens, n_past);
+ result = llm.build_llama();
} break;
case LLM_ARCH_BAICHUAN:
{
- result = llm_build_baichaun(lctx, tokens, embd, n_tokens, n_past);
+ result = llm.build_baichuan();
} break;
case LLM_ARCH_FALCON:
{
- result = llm_build_falcon(lctx, tokens, embd, n_tokens, n_past);
+ result = llm.build_falcon();
+ } break;
+ case LLM_ARCH_STARCODER:
+ {
+ result = llm.build_starcoder();
+ } break;
+ case LLM_ARCH_PERSIMMON:
+ {
+ result = llm.build_persimmon();
+ } break;
+ case LLM_ARCH_REFACT:
+ {
+ result = llm.build_refact();
+ } break;
+ case LLM_ARCH_BLOOM:
+ {
+ result = llm.build_bloom();
+ } break;
+ case LLM_ARCH_MPT:
+ {
+ result = llm.build_mpt();
} break;
default:
GGML_ASSERT(false);
- };
+ }
+
+ llm.free();
+
+ if (worst_case) {
+ int n_non_view_total = 0;
+
+ for (int i = 0; i < result->n_nodes; ++i) {
+ if (result->nodes[i]->view_src == nullptr) {
+ n_non_view_total++;
+ }
+ }
+
+ LLAMA_LOG_INFO("%s: non-view tensors processed: %d/%d\n", __func__, n_non_view, n_non_view_total);
+
+ if (n_non_view != n_non_view_total) {
+ LLAMA_LOG_WARN("%s: ****************************************************************\n", __func__);
+ LLAMA_LOG_WARN("%s: not all non-view tensors have been processed with a callback\n", __func__);
+ LLAMA_LOG_WARN("%s: this can indicate an inefficiency in the graph implementation\n", __func__);
+ LLAMA_LOG_WARN("%s: build with LLAMA_OFFLOAD_DEBUG for more info\n", __func__);
+ LLAMA_LOG_WARN("%s: ref: https://github.com/ggerganov/llama.cpp/pull/3837\n", __func__);
+ LLAMA_LOG_WARN("%s: ****************************************************************\n", __func__);
+ }
+ }
return result;
}
-// evaluate the transformer
+// decode a batch of tokens by evaluating the transformer
//
// - lctx: llama context
-// - tokens: new batch of tokens to process
-// - embd embeddings input
-// - n_tokens number of tokens
-// - n_past: the context size so far
-// - n_threads: number of threads to use
+// - batch: batch to evaluate
//
-static bool llama_eval_internal(
+// return 0 on success
+// return positive int on warning
+// return negative int on error
+//
+static int llama_decode_internal(
llama_context & lctx,
- const llama_token * tokens,
- const float * embd,
- int n_tokens,
- int n_past,
- int n_threads,
- const char * cgraph_fname) {
+ llama_batch batch) {
+ const uint32_t n_tokens = batch.n_tokens;
+
+ if (n_tokens == 0) {
+ LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
+ return -1;
+ }
+
+ const auto & model = lctx.model;
+ const auto & hparams = model.hparams;
+ const auto & cparams = lctx.cparams;
- GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT
+ const auto n_batch = cparams.n_batch;
- GGML_ASSERT(n_tokens > 0);
- GGML_ASSERT(n_past >= 0);
- // TODO: keep the values of n_batch and n_ctx
- // GGML_ASSERT(n_tokens <= n_batch);
- // GGML_ASSERT(n_past + n_tokens <= n_ctx);
+ GGML_ASSERT(n_tokens <= n_batch);
+
+ int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
+ GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
const int64_t t_start_us = ggml_time_us();
#ifdef GGML_USE_MPI
- ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
+ // TODO: needs fix after #3228
+ GGML_ASSERT(false && "not implemented");
+ //ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
#endif
GGML_ASSERT(n_threads > 0);
- const int N = n_tokens;
-
- const auto & model = lctx.model;
- const auto & hparams = model.hparams;
-
- const auto & kv_self = lctx.kv_self;
+ auto & kv_self = lctx.kv_self;
GGML_ASSERT(!!kv_self.ctx);
const int64_t n_embd = hparams.n_embd;
const int64_t n_vocab = hparams.n_vocab;
+ // helpers for smoother batch API transistion
+ // after deprecating the llama_eval calls, these will be removed
+ std::vector<llama_pos> pos;
+
+ std::vector<int32_t> n_seq_id;
+ std::vector<llama_seq_id *> seq_id_arr;
+ std::vector<std::vector<llama_seq_id>> seq_id;
+
+ if (batch.pos == nullptr) {
+ pos.resize(n_tokens);
+ for (uint32_t i = 0; i < n_tokens; i++) {
+ pos[i] = batch.all_pos_0 + i*batch.all_pos_1;
+ }
+
+ batch.pos = pos.data();
+ }
+
+ if (batch.seq_id == nullptr) {
+ n_seq_id.resize(n_tokens);
+ seq_id.resize(n_tokens);
+ seq_id_arr.resize(n_tokens);
+ for (uint32_t i = 0; i < n_tokens; i++) {
+ n_seq_id[i] = 1;
+ seq_id[i].resize(1);
+ seq_id[i][0] = batch.all_seq_id;
+ seq_id_arr[i] = seq_id[i].data();
+ }
+
+ batch.n_seq_id = n_seq_id.data();
+ batch.seq_id = seq_id_arr.data();
+ }
+
+ if (!llama_kv_cache_find_slot(kv_self, batch)) {
+ return 1;
+ }
+
+ // a heuristic, to avoid attending the full cache if it is not yet utilized
+ // after enough generations, the benefit from this heuristic disappears
+ // if we start defragmenting the cache, the benefit from this will be more important
+ //kv_self.n = std::max(32, GGML_PAD(llama_kv_cache_cell_max(kv_self), 32)); // TODO: this might be better for CUDA?
+ kv_self.n = std::min((int32_t) cparams.n_ctx, std::max(32, llama_kv_cache_cell_max(kv_self)));
+
+ //printf("kv_self.n = %d\n", kv_self.n);
+
ggml_allocr_reset(lctx.alloc);
- ggml_cgraph * gf = llama_build_graph(lctx, tokens, embd, n_tokens, n_past);
+ ggml_cgraph * gf = llama_build_graph(lctx, batch);
ggml_allocr_alloc_graph(lctx.alloc, gf);
+ struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
+ struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
+
+ GGML_ASSERT(strcmp(res->name, "result_output") == 0);
+ GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
+
+
#ifdef GGML_USE_CUBLAS
for (int i = 0; i < gf->n_leafs; i++) {
ggml_tensor * node = gf->leafs[i];
if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) {
ggml_cuda_assign_scratch_offset(node, (char*)node->data - (char *) lctx.buf_alloc.data);
+ ggml_cuda_copy_to_device(node);
}
}
ggml_cuda_assign_scratch_offset(node, (char*)node->data - (char *) lctx.buf_alloc.data);
}
}
+
+ // HACK: ggml-alloc may change the tensor backend when reusing a parent, so force output to be on the CPU here if needed
+ if (!lctx.embedding.empty()) {
+ embeddings->backend = GGML_BACKEND_CPU;
+ }
+ res->backend = GGML_BACKEND_CPU;
#endif
// LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
// TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
// we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
// with the BLAS calls. need a better solution
- if (N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
+ if (n_tokens >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
n_threads = std::min(4, n_threads);
}
- struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
- struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
+ // If all tensors can be run on the GPU then using more than 1 thread is detrimental.
+ const bool full_offload_supported =
+ model.arch == LLM_ARCH_LLAMA ||
+ model.arch == LLM_ARCH_BAICHUAN ||
+ model.arch == LLM_ARCH_FALCON ||
+ model.arch == LLM_ARCH_REFACT ||
+ model.arch == LLM_ARCH_MPT;
- GGML_ASSERT(strcmp(res->name, "result_output") == 0);
- GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
+ const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3;
+ if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) {
+ n_threads = 1;
+ }
#if GGML_USE_MPI
const int64_t n_layer = hparams.n_layer;
ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer);
#endif
- // update kv token count
- lctx.kv_self.n = n_past + N;
+ // update the kv ring buffer
+ {
+ if (kv_self.has_shift) {
+ kv_self.has_shift = false;
+ for (uint32_t i = 0; i < kv_self.size; ++i) {
+ kv_self.cells[i].delta = 0;
+ }
+ }
+
+ kv_self.head += n_tokens;
- if (cgraph_fname) {
- ggml_graph_export(gf, cgraph_fname);
+ // Ensure kv cache head points to a valid index.
+ if (kv_self.head >= kv_self.size) {
+ kv_self.head = 0;
+ }
}
#ifdef GGML_PERF
//}
// extract logits
+ // TODO: do not compute and extract logits if only embeddings are needed
+ // need to update the graphs to skip "result_output"
{
auto & logits_out = lctx.logits;
- if (lctx.logits_all) {
- logits_out.resize(n_vocab * N);
- memcpy(logits_out.data(), (float *) ggml_get_data(res), sizeof(float)*n_vocab*N);
+ if (batch.logits) {
+ logits_out.resize(n_vocab * n_tokens);
+ for (uint32_t i = 0; i < n_tokens; i++) {
+ if (batch.logits[i] == 0) {
+ continue;
+ }
+ memcpy(logits_out.data() + (n_vocab*i), (float *) ggml_get_data(res) + (n_vocab*i), sizeof(float)*n_vocab);
+ }
+ } else if (lctx.logits_all) {
+ logits_out.resize(n_vocab * n_tokens);
+ memcpy(logits_out.data(), (float *) ggml_get_data(res), sizeof(float)*n_vocab*n_tokens);
} else {
- // return result for just the last token
logits_out.resize(n_vocab);
- memcpy(logits_out.data(), (float *) ggml_get_data(res) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
+ memcpy(logits_out.data(), (float *) ggml_get_data(res) + (n_vocab*(n_tokens - 1)), sizeof(float)*n_vocab);
}
}
auto & embedding_out = lctx.embedding;
embedding_out.resize(n_embd);
- memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
+ memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(n_tokens - 1)), sizeof(float)*n_embd);
}
// measure the performance only for the single-token evals
- if (N == 1) {
+ if (n_tokens == 1) {
lctx.t_eval_us += ggml_time_us() - t_start_us;
lctx.n_eval++;
}
- else if (N > 1) {
+ else if (n_tokens > 1) {
lctx.t_p_eval_us += ggml_time_us() - t_start_us;
- lctx.n_p_eval += N;
+ lctx.n_p_eval += n_tokens;
}
- return true;
+ // get a more accurate load time, upon first eval
+ // TODO: fix this
+ if (!lctx.has_evaluated_once) {
+ lctx.t_load_us = ggml_time_us() - lctx.t_start_us;
+ lctx.has_evaluated_once = true;
+ }
+
+ return 0;
}
//
return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_BYTE;
}
-static uint8_t llama_token_to_byte(const llama_vocab & vocab, llama_token id) {
+static bool llama_is_user_defined_token(const llama_vocab& vocab, llama_token id) {
+ return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_USER_DEFINED;
+}
+
+static uint8_t llama_token_to_byte(const llama_vocab& vocab, llama_token id) {
GGML_ASSERT(llama_is_byte_token(vocab, id));
const auto& token_data = vocab.id_to_token.at(id);
- auto buf = token_data.text.substr(3, 2);
- return strtol(buf.c_str(), NULL, 16);
+ switch (llama_vocab_get_type(vocab)) {
+ case LLAMA_VOCAB_TYPE_SPM: {
+ auto buf = token_data.text.substr(3, 2);
+ return strtol(buf.c_str(), NULL, 16);
+ }
+ case LLAMA_VOCAB_TYPE_BPE: {
+ GGML_ASSERT(false);
+ return unicode_to_bytes_bpe(token_data.text);
+ }
+ default:
+ GGML_ASSERT(false);
+ }
}
static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) {
- char buf[7];
- int result = snprintf(buf, sizeof(buf), "<0x%02X>", ch);
- GGML_ASSERT(0 <= result && result < 7);
- return vocab.token_to_id.at(buf);
+ static const char * hex = "0123456789ABCDEF";
+ switch (llama_vocab_get_type(vocab)) {
+ case LLAMA_VOCAB_TYPE_SPM: {
+ const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
+ return vocab.token_to_id.at(buf);
+ }
+ case LLAMA_VOCAB_TYPE_BPE: {
+ return vocab.token_to_id.at(bytes_to_unicode_bpe(ch));
+ }
+ default:
+ GGML_ASSERT(false);
+ }
}
static void llama_escape_whitespace(std::string & text) {
llm_symbol sym;
size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
sym.text = word.c_str() + offset;
- sym.n = 1;
sym.n = char_len;
offset += sym.n;
sym.prev = index - 1;
std::string byte_str(1, *j);
auto token_multibyte = vocab.token_to_id.find(byte_str);
if (token_multibyte == vocab.token_to_id.end()) {
- try {
- llama_token token_byte = llama_byte_to_token(vocab, *j);
- output.push_back(token_byte);
- } catch (const std::out_of_range & err) {
- fprintf(stderr,"ERROR: byte not found in vocab: '%s'\n", byte_str.c_str());
- }
- } else {
- output.push_back((*token_multibyte).second);
+ throw std::runtime_error("ERROR: byte not found in vocab");
}
+ output.push_back((*token_multibyte).second);
}
} else {
output.push_back((*token).second);
work_queue.push(bigram);
}
- // probably not 100% correct
- static std::vector<std::string> bpe_gpt2_preprocess(const std::string & text) {
- std::vector<std::string> words;
+ std::vector<std::string> bpe_gpt2_preprocess(const std::string & text) {
+ std::vector<std::string> bpe_words;
+ std::vector<std::string> bpe_encoded_words;
+
+ std::string token = "";
+ // GPT2 system regex: 's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+
+ bool collecting_numeric = false;
+ bool collecting_letter = false;
+ bool collecting_special = false;
+ bool collecting_whitespace_lookahead = false;
+ bool collecting = false;
+
+ std::vector<std::string> text_utf;
+ text_utf.reserve(text.size());
+ bpe_words.reserve(text.size());
+ bpe_encoded_words.reserve(text.size());
+
+ auto cps = codepoints_from_utf8(text);
+ for (size_t i = 0; i < cps.size(); ++i)
+ text_utf.emplace_back(codepoint_to_utf8(cps[i]));
+
+ for (int i = 0; i < (int)text_utf.size(); i++) {
+ const std::string & utf_char = text_utf[i];
+ bool split_condition = false;
+ int bytes_remain = text_utf.size() - i;
+ // forward backward lookups
+ const std::string & utf_char_next = (i + 1 < (int)text_utf.size()) ? text_utf[i + 1] : "";
+ const std::string & utf_char_next_next = (i + 2 < (int)text_utf.size()) ? text_utf[i + 2] : "";
+
+ // handling contractions
+ if (!split_condition && bytes_remain >= 2) {
+ // 's|'t|'m|'d
+ if (utf_char == "\'" && (utf_char_next == "s" || utf_char_next == "t" || utf_char_next == "m" || utf_char_next == "d")) {
+ split_condition = true;
+ }
+ if (split_condition) {
+ if (token.size()) {
+ bpe_words.emplace_back(token); // push previous content as token
+ }
+ token = utf_char + utf_char_next;
+ bpe_words.emplace_back(token);
+ token = "";
+ i++;
+ continue;
+ }
+ }
+ if (!split_condition && bytes_remain >= 3) {
+ // 're|'ve|'ll
+ if (utf_char == "\'" && (
+ (utf_char_next == "r" && utf_char_next_next == "e") ||
+ (utf_char_next == "v" && utf_char_next_next == "e") ||
+ (utf_char_next == "l" && utf_char_next_next == "l"))
+ ) {
+ split_condition = true;
+ }
+ if (split_condition) {
+ // current token + next token can be defined
+ if (token.size()) {
+ bpe_words.emplace_back(token); // push previous content as token
+ }
+ token = utf_char + utf_char_next + utf_char_next_next;
+ bpe_words.emplace_back(token); // the contraction
+ token = "";
+ i += 2;
+ continue;
+ }
+ }
+
+ if (!split_condition && !collecting) {
+ if (codepoint_type(utf_char) == CODEPOINT_TYPE_LETTER || (!token.size() && utf_char == " " && codepoint_type(utf_char_next) == CODEPOINT_TYPE_LETTER)) {
+ collecting_letter = true;
+ collecting = true;
+ }
+ else if (codepoint_type(utf_char) == CODEPOINT_TYPE_DIGIT || (!token.size() && utf_char == " " && codepoint_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
+ collecting_numeric = true;
+ collecting = true;
+ }
+ else if (
+ ((codepoint_type(utf_char) != CODEPOINT_TYPE_LETTER && codepoint_type(utf_char) != CODEPOINT_TYPE_DIGIT) && (codepoint_type(utf_char) != CODEPOINT_TYPE_WHITESPACE)) ||
+ (!token.size() && utf_char == " " && codepoint_type(utf_char_next) != CODEPOINT_TYPE_LETTER && codepoint_type(utf_char_next) != CODEPOINT_TYPE_DIGIT && codepoint_type(utf_char_next) != CODEPOINT_TYPE_WHITESPACE)
+ ) {
+ collecting_special = true;
+ collecting = true;
+ }
+ else if (codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE && codepoint_type(utf_char_next) == CODEPOINT_TYPE_WHITESPACE) {
+ collecting_whitespace_lookahead = true;
+ collecting = true;
+ }
+ else if (codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE) {
+ split_condition = true;
+ }
+ }
+ else if (!split_condition && collecting) {
+ if (collecting_letter && codepoint_type(utf_char) != CODEPOINT_TYPE_LETTER) {
+ split_condition = true;
+ }
+ else if (collecting_numeric && codepoint_type(utf_char) != CODEPOINT_TYPE_DIGIT) {
+ split_condition = true;
+ }
+ else if (collecting_special && (codepoint_type(utf_char) == CODEPOINT_TYPE_LETTER || codepoint_type(utf_char) == CODEPOINT_TYPE_DIGIT || codepoint_type(utf_char) == CODEPOINT_TYPE_WHITESPACE)) {
+ split_condition = true;
+ }
+ else if (collecting_whitespace_lookahead && (codepoint_type(utf_char_next) == CODEPOINT_TYPE_LETTER || codepoint_type(utf_char_next) == CODEPOINT_TYPE_DIGIT)) {
+ split_condition = true;
+ }
+ }
+
+ if (utf_char_next == "") {
+ split_condition = true; // final
+ token += utf_char;
+ }
- // ref: https://github.com/openai/gpt-2/blob/a74da5d99abaaba920de8131d64da2862a8f213b/src/encoder.py#L53
- const std::string pattern = R"('s|'t|'re|'ve|'m|'ll|'d| ?[[:alpha:]]+| ?[[:digit:]]+| ?[^\s[:alpha:][:digit:]]+|\s+(?!\S)|\s+)";
- const std::regex re(pattern);
+ if (split_condition) {
+ if (token.size()) {
+ bpe_words.emplace_back(token);
+ }
+ token = utf_char;
+ collecting = false;
+ collecting_letter = false;
+ collecting_numeric = false;
+ collecting_special = false;
+ collecting_whitespace_lookahead = false;
+ }
+ else {
+ token += utf_char;
+ }
+ }
- auto words_begin = std::sregex_iterator(text.begin(), text.end(), re);
- auto words_end = std::sregex_iterator();
- auto n_words = std::distance(words_begin, words_end);
- words.reserve(n_words);
- for (auto it = words_begin; it != words_end; ++it) {
- words.push_back(it->str());
+ for (std::string & word : bpe_words) {
+ std::string encoded_token = "";
+ for (char & c : word) {
+ encoded_token += bytes_to_unicode_bpe(c);
+ }
+ bpe_encoded_words.emplace_back(encoded_token);
}
- return words;
+ return bpe_encoded_words;
}
const llama_vocab & vocab;
llm_bigram_bpe::queue work_queue;
};
-static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos) {
+typedef enum FRAGMENT_BUFFER_VARIANT_TYPE{
+ FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN,
+ FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT
+} FRAGMENT_BUFFER_VARIANT_TYPE;
+
+struct fragment_buffer_variant{
+ fragment_buffer_variant(llama_vocab::id _token)
+ :
+ type(FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN),
+ token(_token),
+ raw_text(_dummy),
+ offset(0),
+ length(0){}
+ fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
+ :
+ type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
+ token((llama_vocab::id)-1),
+ raw_text(_raw_text),
+ offset(_offset),
+ length(_length){
+ GGML_ASSERT( _offset >= 0 );
+ GGML_ASSERT( _length >= 1 );
+ GGML_ASSERT( offset + length <= raw_text.length() );
+ }
+
+ const FRAGMENT_BUFFER_VARIANT_TYPE type;
+ const llama_vocab::id token;
+ const std::string _dummy;
+ const std::string & raw_text;
+ const uint64_t offset;
+ const uint64_t length;
+};
+
+// #define PRETOKENIZERDEBUG
+
+static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<fragment_buffer_variant> & buffer)
+{
+ // for each special token
+ for (const auto & st: vocab.special_tokens_cache) {
+ const auto & special_token = st.first;
+ const auto & special_id = st.second;
+
+ // for each text fragment
+ std::forward_list<fragment_buffer_variant>::iterator it = buffer.begin();
+ while (it != buffer.end()) {
+ auto & fragment = (*it);
+
+ // if a fragment is text ( not yet processed )
+ if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
+ auto * raw_text = &(fragment.raw_text);
+
+ auto raw_text_base_offset = fragment.offset;
+ auto raw_text_base_length = fragment.length;
+
+ // loop over the text
+ while (true) {
+ // find the first occurence of a given special token in this fragment
+ // passing offset argument only limit the "search area" but match coordinates
+ // are still relative to the source full raw_text
+ auto match = raw_text->find(special_token, raw_text_base_offset);
+
+ // no occurences found, stop processing this fragment for a given special token
+ if (match == std::string::npos) break;
+
+ // check if match is within bounds of offset <-> length
+ if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
+
+#ifdef PRETOKENIZERDEBUG
+ fprintf(stderr, "FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
+#endif
+ auto source = std::distance(buffer.begin(), it);
+
+ // if match is further than base offset
+ // then we have some text to the left of it
+ if (match > raw_text_base_offset) {
+ // left
+ const int64_t left_reminder_offset = raw_text_base_offset + 0;
+ const int64_t left_reminder_length = match - raw_text_base_offset;
+ buffer.emplace_after(it, (*raw_text), left_reminder_offset, left_reminder_length);
+
+#ifdef PRETOKENIZERDEBUG
+ fprintf(stderr, "FL: (%ld %ld) '%s'\n", left_reminder_offset, left_reminder_length, raw_text->substr(left_reminder_offset, left_reminder_length).c_str());
+#endif
+ it++;
+ }
+
+ // special token
+ buffer.emplace_after(it, special_id);
+ it++;
+
+ // right
+ if (match + special_token.length() < raw_text_base_offset + raw_text_base_length) {
+ const int64_t right_reminder_offset = match + special_token.length();
+ const int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
+ buffer.emplace_after(it, (*raw_text), right_reminder_offset, right_reminder_length);
+
+#ifdef PRETOKENIZERDEBUG
+ fprintf(stderr, "FR: (%ld %ld) '%s'\n", right_reminder_offset, right_reminder_length, raw_text->substr(right_reminder_offset, right_reminder_length).c_str());
+#endif
+
+ it++;
+
+ if (source == 0) {
+ buffer.erase_after(buffer.before_begin());
+ } else {
+ buffer.erase_after(std::next(buffer.begin(), (source-1)));
+ }
+
+ // repeat for the right side
+ raw_text_base_offset = right_reminder_offset;
+ raw_text_base_length = right_reminder_length;
+
+#ifdef PRETOKENIZERDEBUG
+ fprintf(stderr, "RR: (%ld %ld) '%s'\n", raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
+#endif
+ } else {
+ if (source == 0) {
+ buffer.erase_after(buffer.before_begin());
+ } else {
+ buffer.erase_after(std::next(buffer.begin(), (source-1)));
+ }
+ break;
+ }
+ }
+ }
+ it++;
+ }
+ }
+}
+
+static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos, bool special) {
std::vector<llama_vocab::id> output;
// OG tokenizer behavior:
return output;
}
+ std::forward_list<fragment_buffer_variant> fragment_buffer;
+ fragment_buffer.emplace_front( raw_text, 0, raw_text.length() );
+
+ if (special) tokenizer_st_partition( vocab, fragment_buffer );
+
switch (vocab.type) {
case LLAMA_VOCAB_TYPE_SPM:
{
- // without adding this leading whitespace, we do not get the same results as the original tokenizer
- raw_text = " " + raw_text;
+ for (const auto & fragment: fragment_buffer)
+ {
+ if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT)
+ {
+ // without adding this leading whitespace, we do not get the same results as the original tokenizer
+
+ // TODO: It's likely possible to get rid of this string copy entirely
+ // by modifying llm_tokenizer_x to operate with string offsets like pre-tokenizer
+ // and passing 'add space prefix' as bool argument
+ //
+ auto raw_text = (special ? "" : " ") + fragment.raw_text.substr(fragment.offset, fragment.length);
- llm_tokenizer_spm tokenizer(vocab);
- llama_escape_whitespace(raw_text);
- tokenizer.tokenize(raw_text, output);
+#ifdef PRETOKENIZERDEBUG
+ fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
+#endif
+ llm_tokenizer_spm tokenizer(vocab);
+ llama_escape_whitespace(raw_text);
+ tokenizer.tokenize(raw_text, output);
+ }
+ else // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
+ {
+ output.push_back(fragment.token);
+ }
+ }
} break;
case LLAMA_VOCAB_TYPE_BPE:
{
- llm_tokenizer_bpe tokenizer(vocab);
- tokenizer.tokenize(raw_text, output);
+ for (const auto & fragment: fragment_buffer)
+ {
+ if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT)
+ {
+ auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
+
+#ifdef PRETOKENIZERDEBUG
+ fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
+#endif
+ llm_tokenizer_bpe tokenizer(vocab);
+ tokenizer.tokenize(raw_text, output);
+ }
+ else // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
+ {
+ output.push_back(fragment.token);
+ }
+ }
} break;
- };
+ }
return output;
}
// Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as
// pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`.
-std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
+static std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
const char * src,
llama_partial_utf8 partial_start) {
static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 };
std::vector<llama_grammar_candidate> rejects;
if (stack.empty()) {
- for (auto tok : candidates) {
+ for (const auto & tok : candidates) {
if (*tok.code_points != 0 || tok.partial_utf8.n_remain != 0) {
rejects.push_back(tok);
}
const llama_grammar_element * stack_pos = stack.back();
std::vector<llama_grammar_candidate> next_candidates;
- for (auto tok : candidates) {
+ for (const auto & tok : candidates) {
if (*tok.code_points == 0) {
// reached end of full codepoints in token, reject iff it ended in a partial sequence
// that cannot satisfy this position in grammar
llama_grammar_advance_stack(rules, stack_after, next_stacks);
auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
- for (auto tok : next_rejects) {
+ for (const auto & tok : next_rejects) {
rejects.push_back({ tok.index, tok.code_points - 1, tok.partial_utf8 });
}
// sampling
//
+void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
+ if (seed == LLAMA_DEFAULT_SEED) {
+ seed = time(NULL);
+ }
+ ctx->rng.seed(seed);
+}
+
void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
GGML_ASSERT(candidates->size > 0);
}
}
+void llama_sample_min_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
+ if (p <= 0.0f || !candidates->size) {
+ return;
+ }
+
+ llama_sample_softmax(ctx, candidates);
+
+ const int64_t t_start_sample_us = ggml_time_us();
+
+ float scale = candidates->data[0].p; // scale by max prob
+ size_t i = 1; // first token always matches
+
+ for (; i < candidates->size; ++i) {
+ if (candidates->data[i].p < p * scale && i >= min_keep) {
+ break; // prob too small
+ }
+ }
+
+ // Resize the output vector to keep only the matching tokens
+ candidates->size = i;
+
+ if (ctx) {
+ ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
+ }
+}
+
void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep) {
if (z >= 1.0f || candidates->size <= 2) {
return;
}
}
-void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
+void llama_sample_temp(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
const int64_t t_start_sample_us = ggml_time_us();
for (size_t i = 0; i < candidates_p->size; ++i) {
}
}
-void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty) {
- if (last_tokens_size == 0 || penalty == 1.0f) {
- return;
- }
-
- const int64_t t_start_sample_us = ggml_time_us();
-
- for (size_t i = 0; i < candidates->size; ++i) {
- const auto * token_iter = std::find(last_tokens, last_tokens + last_tokens_size, candidates->data[i].id);
- if (token_iter == last_tokens + last_tokens_size) {
- continue;
- }
-
- // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
- // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
- if (candidates->data[i].logit <= 0) {
- candidates->data[i].logit *= penalty;
- } else {
- candidates->data[i].logit /= penalty;
- }
- }
-
- candidates->sorted = false;
-
- if (ctx) {
- ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
- }
+void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates_p, float temp) {
+ llama_sample_temp(ctx, candidates_p, temp);
}
-void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens_p, size_t last_tokens_size, float alpha_frequency, float alpha_presence) {
- if (last_tokens_size == 0 || (alpha_frequency == 0.0f && alpha_presence == 0.0f)) {
+void llama_sample_repetition_penalties(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ const llama_token * last_tokens,
+ size_t penalty_last_n,
+ float penalty_repeat,
+ float penalty_freq,
+ float penalty_present) {
+ if (penalty_last_n == 0 || (penalty_repeat == 1.0f && penalty_freq == 0.0f && penalty_present == 0.0f)) {
return;
}
// Create a frequency map to count occurrences of each token in last_tokens
std::unordered_map<llama_token, int> token_count;
- for (size_t i = 0; i < last_tokens_size; ++i) {
- token_count[last_tokens_p[i]]++;
+ for (size_t i = 0; i < penalty_last_n; ++i) {
+ token_count[last_tokens[i]]++;
}
// Apply frequency and presence penalties to the candidates
for (size_t i = 0; i < candidates->size; ++i) {
- auto token_iter = token_count.find(candidates->data[i].id);
+ const auto token_iter = token_count.find(candidates->data[i].id);
if (token_iter == token_count.end()) {
continue;
}
- int count = token_iter->second;
- candidates->data[i].logit -= float(count) * alpha_frequency + float(count > 0) * alpha_presence;
+ const int count = token_iter->second;
+
+ // The academic publication that described this technique actually just only divided, but that would cause tokens with negative logits to become more likely, which is obviously wrong.
+ // This is common fix for this problem, which is to multiply by the penalty instead of dividing.
+ if (candidates->data[i].logit <= 0) {
+ candidates->data[i].logit *= penalty_repeat;
+ } else {
+ candidates->data[i].logit /= penalty_repeat;
+ }
+
+ candidates->data[i].logit -= float(count) * penalty_freq + float(count > 0) * penalty_present;
}
candidates->sorted = false;
}
}
- const llama_token eos = llama_token_eos(ctx);
+ const llama_token eos = llama_token_eos(&ctx->model);
std::vector<std::pair<std::vector<uint32_t>, llama_partial_utf8>> candidates_decoded;
std::vector<llama_grammar_candidate> candidates_grammar;
for (size_t i = 0; i < candidates->size; ++i) {
const llama_token id = candidates->data[i].id;
- const std::string piece = llama_token_to_str(ctx, id);
+ const std::string piece = llama_token_to_piece(ctx, id);
if (id == eos) {
if (!allow_eos) {
candidates->data[i].logit = -INFINITY;
GGML_ASSERT(ctx);
- auto n_vocab = llama_n_vocab(ctx);
+ auto n_vocab = llama_n_vocab(llama_get_model(ctx));
GGML_ASSERT(n_vocab == (int)candidates->size);
GGML_ASSERT(!candidates->sorted);
llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu) {
GGML_ASSERT(ctx);
- auto N = float(llama_n_vocab(ctx));
+ auto N = float(llama_n_vocab(llama_get_model(ctx)));
int64_t t_start_sample_us;
t_start_sample_us = ggml_time_us();
void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
const int64_t t_start_sample_us = ggml_time_us();
- if (token == llama_token_eos(ctx)) {
+ if (token == llama_token_eos(&ctx->model)) {
for (const auto & stack : grammar->stacks) {
if (stack.empty()) {
return;
GGML_ASSERT(false);
}
- const std::string piece = llama_token_to_str(ctx, token);
+ const std::string piece = llama_token_to_piece(ctx, token);
// Note terminating 0 in decoded string
const auto decoded = decode_utf8(piece.c_str(), grammar->partial_utf8);
};
llama_logit_info(llama_context * ctx)
: logits(llama_get_logits(ctx))
- , n_vocab(llama_n_vocab(ctx))
+ , n_vocab(llama_n_vocab(llama_get_model(ctx)))
, max_l(*std::max_element(logits, logits + n_vocab))
, normalizer(1.0f / std::accumulate(logits, logits + n_vocab, 0.0f, sum_exp{max_l}))
{ }
size_t n_beams;
int n_past;
int n_predict;
- int n_threads;
std::vector<llama_beam> beams;
std::vector<llama_beam> next_beams;
// Used to communicate to/from callback on beams state.
std::vector<llama_beam_view> beam_views;
- llama_beam_search_data(llama_context * ctx, size_t n_beams, int n_past, int n_predict, int n_threads)
+ llama_beam_search_data(llama_context * ctx, size_t n_beams, int n_past, int n_predict)
: ctx(ctx)
, n_beams(n_beams)
, n_past(n_past)
, n_predict(n_predict)
- , n_threads(n_threads)
, beam_views(n_beams) {
beams.reserve(n_beams);
next_beams.reserve(n_beams);
} else {
// beam is not at end-of-sentence, so branch with next top_k tokens.
if (!beam.tokens.empty()) {
- llama_eval(ctx, beam.tokens.data(), beam.tokens.size(), n_past, n_threads);
+ llama_decode(ctx, llama_batch_get_one(beam.tokens.data(), beam.tokens.size(), n_past, 0));
}
llama_logit_info logit_info(ctx);
std::vector<llama_token_data> next_tokens = logit_info.top_k(n_beams);
callback(callback_data, get_beams_state(false)); // Sets common_prefix_length
update_beams_from_beam_views(); // Update values (p,eob) that callback may have changed.
if (common_prefix_length) {
- llama_eval(ctx, beams[0].tokens.data(), common_prefix_length, n_past, n_threads);
+ llama_decode(ctx, llama_batch_get_one(beams[0].tokens.data(), common_prefix_length, n_past, 0));
n_past += common_prefix_length;
}
// Zero-out next_beam probabilities to place them last in following min-heap.
void llama_beam_search(llama_context * ctx,
llama_beam_search_callback_fn_t callback, void * callback_data,
- size_t n_beams, int n_past, int n_predict, int n_threads) {
+ size_t n_beams, int n_past, int n_predict) {
assert(ctx);
const int64_t t_start_sample_us = ggml_time_us();
- llama_beam_search_data beam_search_data(ctx, n_beams, n_past, n_predict, n_threads);
+ llama_beam_search_data beam_search_data(ctx, n_beams, n_past, n_predict);
beam_search_data.loop(callback, callback_data);
no_init() { /* do nothing */ }
};
+struct quantize_state_internal {
+ const llama_model & model;
+ const llama_model_quantize_params * params;
+
+ int n_attention_wv = 0;
+ int n_feed_forward_w2 = 0;
+ int i_attention_wv = 0;
+ int i_feed_forward_w2 = 0;
+
+ int n_k_quantized = 0;
+ int n_fallback = 0;
+
+ quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
+ : model(model)
+ , params(params)
+ {}
+};
+
static void llama_convert_tensor_internal(
struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
const size_t nelements, const int nthread
workers.clear();
}
-#ifdef GGML_USE_K_QUANTS
static ggml_type get_k_quant_type(
- ggml_type new_type, const ggml_tensor * tensor, const llama_model & model, llama_ftype ftype, int * i_attention_wv,
- int n_attention_wv, int * i_feed_forward_w2, int n_feed_forward_w2
+ quantize_state_internal & qs,
+ ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype
) {
const std::string name = ggml_get_name(tensor);
// TODO: avoid hardcoded tensor names - use the TN_* constants
- const auto tn = LLM_TN(model.arch);
+ const llm_arch arch = qs.model.arch;
+ const auto tn = LLM_TN(arch);
auto use_more_bits = [](int i_layer, int num_layers) -> bool {
return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
if (name == tn(LLM_TENSOR_OUTPUT, "weight")) {
int nx = tensor->ne[0];
- if (model.arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
+ if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
new_type = GGML_TYPE_Q8_0;
}
else if (new_type != GGML_TYPE_Q8_0) {
} else if (name.find("attn_v.weight") != std::string::npos) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
- new_type = *i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
+ new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
- use_more_bits(*i_attention_wv, n_attention_wv)) new_type = GGML_TYPE_Q6_K;
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && *i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
+ use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
- (*i_attention_wv < n_attention_wv/8 || *i_attention_wv >= 7*n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
- if (model.type == MODEL_70B) {
+ (qs.i_attention_wv < qs.n_attention_wv/8 || qs.i_attention_wv >= 7*qs.n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
+ if (qs.model.type == MODEL_70B) {
// In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
// 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
// nearly negligible increase in model size by quantizing this tensor with more bits:
if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
}
- ++*i_attention_wv;
+ ++qs.i_attention_wv;
} else if (name.find("ffn_down.weight") != std::string::npos) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
- new_type = *i_feed_forward_w2 < 2 ? GGML_TYPE_Q5_K
- : model.arch != LLM_ARCH_FALCON || use_more_bits(*i_feed_forward_w2, n_feed_forward_w2) ? GGML_TYPE_Q4_K
+ new_type = qs.i_feed_forward_w2 < 2 ? GGML_TYPE_Q5_K
+ : arch != LLM_ARCH_FALCON || use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2) ? GGML_TYPE_Q4_K
: GGML_TYPE_Q3_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
- new_type = model.arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
+ new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
}
else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
- if (model.arch == LLM_ARCH_FALCON) {
- new_type = *i_feed_forward_w2 < 2 ? GGML_TYPE_Q6_K :
- use_more_bits(*i_feed_forward_w2, n_feed_forward_w2) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
+ if (arch == LLM_ARCH_FALCON) {
+ new_type = qs.i_feed_forward_w2 < 2 ? GGML_TYPE_Q6_K :
+ use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
} else {
- if (use_more_bits(*i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
+ if (use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
}
}
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(*i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
- else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && model.arch != LLM_ARCH_FALCON && *i_feed_forward_w2 < 4) {
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(qs.i_feed_forward_w2, qs.n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && qs.i_feed_forward_w2 < 4) {
new_type = GGML_TYPE_Q5_K;
}
- ++*i_feed_forward_w2;
+ ++qs.i_feed_forward_w2;
} else if (name.find("attn_output.weight") != std::string::npos) {
- if (model.arch != LLM_ARCH_FALCON) {
+ if (arch != LLM_ARCH_FALCON) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) new_type = GGML_TYPE_Q4_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
int nx = tensor->ne[0];
int ny = tensor->ne[1];
if (nx % QK_K != 0) {
- LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for k-quants\n", __func__, nx, ny, QK_K);
+ LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
convert_incompatible_tensor = true;
+ } else {
+ ++qs.n_k_quantized;
}
}
if (convert_incompatible_tensor) {
- if (name == tn(LLM_TENSOR_OUTPUT, "weight")) {
- new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing.
- LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n");
- } else if (name == tn(LLM_TENSOR_TOKEN_EMBD, "weight")) {
- new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing.
- LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n");
- } else {
- throw std::runtime_error("Unsupported tensor size encountered\n");
+ switch (new_type) {
+ case GGML_TYPE_Q2_K: new_type = GGML_TYPE_Q4_0; break;
+ case GGML_TYPE_Q3_K: new_type = GGML_TYPE_Q4_1; break;
+ case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
+ case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
+ case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
+ default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
}
+ LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
+ ++qs.n_fallback;
}
return new_type;
}
-#endif
static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
ggml_type quantized_type;
case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
-#ifdef GGML_USE_K_QUANTS
// K-quants
case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
case LLAMA_FTYPE_MOSTLY_Q3_K_S:
case LLAMA_FTYPE_MOSTLY_Q5_K_S:
case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
-#endif
+
default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
}
nthread = std::thread::hardware_concurrency();
}
- std::unique_ptr<llama_model_loader> ml(new llama_model_loader(fname_inp, /*use_mmap*/ false));
+ // mmap consistently increases speed Linux, and also increases speed on Windows with
+ // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
+#if defined(__linux__) || defined(_WIN32)
+ constexpr bool use_mmap = true;
+#else
+ constexpr bool use_mmap = false;
+#endif
+
+ llama_model_loader ml(fname_inp, use_mmap);
+ if (ml.use_mmap) {
+ ml.mapping.reset(new llama_mmap(&ml.file, /* prefetch */ 0, ggml_is_numa()));
+ }
llama_model model;
- llm_load_arch(*ml, model);
- llm_load_hparams(*ml, model, 0, 0, 0);
+ llm_load_arch(ml, model);
+ llm_load_hparams(ml, model);
+
+ struct quantize_state_internal qs(model, params);
if (params->only_copy) {
ftype = model.ftype;
struct gguf_context * ctx_out = gguf_init_empty();
// copy the KV pairs from the input file
- gguf_set_kv (ctx_out, ml->ctx_gguf);
+ gguf_set_kv (ctx_out, ml.ctx_gguf);
gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
gguf_set_val_u32(ctx_out, "general.file_type", ftype);
-#ifdef GGML_USE_K_QUANTS
- int n_attention_wv = 0;
- int n_feed_forward_w2 = 0;
-
- for (int i = 0; i < ml->n_tensors; ++i) {
- struct ggml_tensor * meta = ml->get_tensor_meta(i);
+ for (int i = 0; i < ml.n_tensors; ++i) {
+ struct ggml_tensor * meta = ml.get_tensor_meta(i);
const std::string name = ggml_get_name(meta);
// TODO: avoid hardcoded tensor names - use the TN_* constants
- if (name.find("attn_v.weight") != std::string::npos) {
- ++n_attention_wv;
+ if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
+ ++qs.n_attention_wv;
}
else if (name.find("ffn_down.weight") != std::string::npos) {
- ++n_feed_forward_w2;
+ ++qs.n_feed_forward_w2;
}
}
- if (n_attention_wv != n_feed_forward_w2 || (uint32_t)n_attention_wv != model.hparams.n_layer) {
+ if (qs.n_attention_wv != qs.n_feed_forward_w2 || (uint32_t)qs.n_attention_wv != model.hparams.n_layer) {
LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_feed_forward_w2 = %d, hparams.n_layer = %d\n",
- __func__, n_attention_wv, n_feed_forward_w2, model.hparams.n_layer);
+ __func__, qs.n_attention_wv, qs.n_feed_forward_w2, model.hparams.n_layer);
}
- int i_attention_wv = 0;
- int i_feed_forward_w2 = 0;
-#endif
-
size_t total_size_org = 0;
size_t total_size_new = 0;
std::vector<int64_t> hist_all(1 << 4, 0);
std::vector<no_init<float>> f32_conv_buf;
// populate the original tensors so we get an initial meta data
- for (int i = 0; i < ml->n_tensors; ++i) {
- struct ggml_tensor * meta = ml->get_tensor_meta(i);
+ for (int i = 0; i < ml.n_tensors; ++i) {
+ struct ggml_tensor * meta = ml.get_tensor_meta(i);
gguf_add_tensor(ctx_out, meta);
}
std::ofstream fout(fname_out, std::ios::binary);
+ fout.exceptions(std::ofstream::failbit); // fail fast on write errors
const size_t meta_size = gguf_get_meta_size(ctx_out);
// placeholder for the meta data
::zeros(fout, meta_size);
- for (int i = 0; i < ml->n_tensors; ++i) {
- struct ggml_tensor * tensor = ml->get_tensor_meta(i);
+ for (int i = 0; i < ml.n_tensors; ++i) {
+ struct ggml_tensor * tensor = ml.get_tensor_meta(i);
const std::string name = ggml_get_name(tensor);
- if (read_data.size() < ggml_nbytes(tensor)) {
- read_data.resize(ggml_nbytes(tensor));
+ if (!ml.use_mmap) {
+ if (read_data.size() < ggml_nbytes(tensor)) {
+ read_data.resize(ggml_nbytes(tensor));
+ }
+ tensor->data = read_data.data();
}
- tensor->data = read_data.data();
- ml->load_data_for(tensor);
+ ml.load_data_for(tensor);
LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
- ++idx, ml->n_tensors,
+ ++idx, ml.n_tensors,
ggml_get_name(tensor),
llama_format_tensor_shape(tensor).c_str(),
ggml_type_name(tensor->type));
if (quantize) {
new_type = quantized_type;
-#ifdef GGML_USE_K_QUANTS
- new_type = get_k_quant_type(
- new_type, tensor, model, ftype, &i_attention_wv, n_attention_wv, &i_feed_forward_w2, n_feed_forward_w2
- );
-#endif
+ if (!params->pure) {
+ new_type = get_k_quant_type(qs, new_type, tensor, ftype);
+ }
+
// If we've decided to quantize to the same type the tensor is already
// in then there's nothing to do.
quantize = tensor->type != new_type;
LLAMA_LOG_INFO("\n");
}
}
+
+ if (qs.n_fallback > 0) {
+ LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) incompatible with k-quants and required fallback quantization\n",
+ __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
+ }
}
-// TODO: after the GGUF PR, this likely won't work and needs to be updated
-int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) {
+static int llama_apply_lora_from_file_internal(
+ const struct llama_model & model, const char * path_lora, float scale, const char * path_base_model, int n_threads
+) {
LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
const int64_t t_start_lora_us = ggml_time_us();
int32_t lora_alpha;
fin.read((char *) &lora_r, sizeof(lora_r));
fin.read((char *) &lora_alpha, sizeof(lora_alpha));
- float scaling = (float)lora_alpha / (float)lora_r;
+ float scaling = scale * (float)lora_alpha / (float)lora_r;
LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
ggml_tensor * dest_t = model_tensors[base_name];
- offload_func_t offload_func = llama_nop;
- offload_func_t offload_func_force_inplace = llama_nop;
+ offload_func_t offload_func = ggml_offload_nop;
+ offload_func_t offload_func_force_inplace = ggml_offload_nop;
#ifdef GGML_USE_CUBLAS
if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) {
if (dest_t->type != GGML_TYPE_F16) {
throw std::runtime_error(format(
- "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models", __func__));
+ "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models. dest_t->type: %d", __func__, dest_t->type));
}
offload_func = ggml_cuda_assign_buffers;
offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace;
ggml_set_name(r, "r_cpy");
}
- struct ggml_cgraph gf = ggml_build_forward(r);
+ struct ggml_cgraph * gf = ggml_new_graph(lora_ctx);
+ ggml_build_forward_expand(gf, r);
- ggml_graph_compute_helper(work_buffer, &gf, n_threads);
+ ggml_graph_compute_helper(work_buffer, gf, n_threads);
// we won't need these tensors again, reset the context to save memory
ggml_free(lora_ctx);
//
// interface implementation
//
-
-struct llama_context_params llama_context_default_params() {
- struct llama_context_params result = {
- /*.seed =*/ LLAMA_DEFAULT_SEED,
- /*.n_ctx =*/ 512,
- /*.n_batch =*/ 512,
+struct llama_model_params llama_model_default_params() {
+ struct llama_model_params result = {
/*.n_gpu_layers =*/ 0,
/*.main_gpu =*/ 0,
/*.tensor_split =*/ nullptr,
- /*.rope_freq_base =*/ 10000.0f,
- /*.rope_freq_scale =*/ 1.0f,
/*.progress_callback =*/ nullptr,
/*.progress_callback_user_data =*/ nullptr,
- /*.low_vram =*/ false,
- /*.mul_mat_q =*/ true,
- /*.f16_kv =*/ true,
- /*.logits_all =*/ false,
/*.vocab_only =*/ false,
/*.use_mmap =*/ true,
/*.use_mlock =*/ false,
- /*.embedding =*/ false,
};
#ifdef GGML_USE_METAL
return result;
}
+struct llama_context_params llama_context_default_params() {
+ struct llama_context_params result = {
+ /*.seed =*/ LLAMA_DEFAULT_SEED,
+ /*.n_ctx =*/ 512,
+ /*.n_batch =*/ 512,
+ /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
+ /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
+ /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_UNSPECIFIED,
+ /*.rope_freq_base =*/ 0.0f,
+ /*.rope_freq_scale =*/ 0.0f,
+ /*.yarn_ext_factor =*/ -1.0f,
+ /*.yarn_attn_factor =*/ 1.0f,
+ /*.yarn_beta_fast =*/ 32.0f,
+ /*.yarn_beta_slow =*/ 1.0f,
+ /*.yarn_orig_ctx =*/ 0,
+ /*.mul_mat_q =*/ true,
+ /*.f16_kv =*/ true,
+ /*.logits_all =*/ false,
+ /*.embedding =*/ false,
+ };
+
+ return result;
+}
+
struct llama_model_quantize_params llama_model_quantize_default_params() {
struct llama_model_quantize_params result = {
/*.nthread =*/ 0,
/*.allow_requantize =*/ false,
/*.quantize_output_tensor =*/ true,
/*.only_copy =*/ false,
+ /*.pure =*/ false,
};
return result;
struct llama_model * llama_load_model_from_file(
const char * path_model,
- struct llama_context_params params) {
+ struct llama_model_params params) {
ggml_time_init();
llama_model * model = new llama_model;
- ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
-
unsigned cur_percentage = 0;
if (params.progress_callback == NULL) {
params.progress_callback_user_data = &cur_percentage;
};
}
- if (!llama_model_load(path_model, *model, params.n_ctx, params.n_batch, params.n_gpu_layers,
- params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,
- params.low_vram, memory_type, params.use_mmap, params.use_mlock, params.vocab_only,
- params.progress_callback, params.progress_callback_user_data)) {
+ if (!llama_model_load(path_model, *model, params)) {
LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
delete model;
return nullptr;
llama_context * ctx = new llama_context(*model);
+ const auto & hparams = model->hparams;
+ auto & cparams = ctx->cparams;
+
+ cparams.n_batch = params.n_batch;
+ cparams.n_threads = params.n_threads;
+ cparams.n_threads_batch = params.n_threads_batch;
+ cparams.yarn_ext_factor = params.yarn_ext_factor;
+ cparams.yarn_attn_factor = params.yarn_attn_factor;
+ cparams.yarn_beta_fast = params.yarn_beta_fast;
+ cparams.yarn_beta_slow = params.yarn_beta_slow;
+ cparams.mul_mat_q = params.mul_mat_q;
+
+ cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
+ cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
+ cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
+
+ cparams.n_yarn_orig_ctx = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
+ hparams.n_yarn_orig_ctx != 0 ? hparams.n_yarn_orig_ctx :
+ hparams.n_ctx_train;
+
+ auto rope_scaling_type = params.rope_scaling_type;
+ if (rope_scaling_type == LLAMA_ROPE_SCALING_UNSPECIFIED) {
+ rope_scaling_type = hparams.rope_scaling_type_train;
+ }
+
+ if (rope_scaling_type == LLAMA_ROPE_SCALING_NONE) {
+ cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none
+ }
+
+ if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set'
+ cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_YARN ? 1.0f : 0.0f;
+ }
+
if (params.seed == LLAMA_DEFAULT_SEED) {
params.seed = time(NULL);
}
+ LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
+ LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
+ LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
+
ctx->rng = std::mt19937(params.seed);
ctx->logits_all = params.logits_all;
ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
// reserve memory for context buffers
- if (!params.vocab_only) {
- if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) {
+ if (!hparams.vocab_only) {
+ if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, cparams.n_ctx, model->n_gpu_layers)) {
LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
llama_free(ctx);
return nullptr;
LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
}
- const auto & hparams = ctx->model.hparams;
-
// resized during inference
if (params.logits_all) {
- ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab);
+ ctx->logits.reserve(cparams.n_ctx*hparams.n_vocab);
} else {
ctx->logits.reserve(hparams.n_vocab);
}
{
static const size_t tensor_alignment = 32;
// the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data
- ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead());
+ ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead());
// create measure allocator
ctx->alloc = ggml_allocr_new_measure(tensor_alignment);
// build worst-case graph
- int n_tokens = std::min((int)hparams.n_ctx, params.n_batch);
- int n_past = hparams.n_ctx - n_tokens;
- llama_token token = llama_token_bos(ctx); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
- ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past);
+ int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_batch);
+ int n_past = cparams.n_ctx - n_tokens;
+ llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
+ ggml_cgraph * gf = llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0));
+
#ifdef GGML_USE_METAL
- if (params.n_gpu_layers > 0) {
+ if (model->n_gpu_layers > 0) {
+ ggml_metal_log_set_callback(llama_log_callback_default, NULL);
+
ctx->ctx_metal = ggml_metal_init(1);
if (!ctx->ctx_metal) {
LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__);
llama_free(ctx);
return NULL;
}
- ggml_metal_graph_find_concurrency(ctx->ctx_metal, gf, false);
- ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
+ //ggml_metal_graph_find_concurrency(ctx->ctx_metal, gf, false);
+ //ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
}
#endif
// measure memory requirements for the graph
size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
- LLAMA_LOG_INFO("%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
+ LLAMA_LOG_INFO("%s: compute buffer total size = %.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
// recreate allocator with exact memory requirements
ggml_allocr_free(ctx->alloc);
ctx->alloc = ggml_allocr_new(ctx->buf_alloc.data, ctx->buf_alloc.size, tensor_alignment);
#ifdef GGML_USE_METAL
if (ctx->ctx_metal) {
- ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
+ //ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
}
#endif
#ifdef GGML_USE_CUBLAS
- if (params.low_vram) {
- LLAMA_LOG_INFO("%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
- ggml_cuda_set_scratch_size(0); // disable scratch
- } else {
- ggml_cuda_set_scratch_size(alloc_size);
- LLAMA_LOG_INFO("%s: VRAM scratch buffer: %.2f MB\n", __func__, alloc_size / 1024.0 / 1024.0);
+ ggml_cuda_set_scratch_size(alloc_size);
+ LLAMA_LOG_INFO("%s: VRAM scratch buffer: %.2f MB\n", __func__, alloc_size / 1024.0 / 1024.0);
+
+ // calculate total VRAM usage
+ auto add_tensor = [](const ggml_tensor * t, size_t & size) {
+ if (t->backend == GGML_BACKEND_GPU || t->backend == GGML_BACKEND_GPU_SPLIT) {
+ size += ggml_nbytes(t);
+ }
+ };
+ size_t model_vram_size = 0;
+ for (const auto & kv : model->tensors_by_name) {
+ add_tensor(kv.second, model_vram_size);
}
+
+ size_t kv_vram_size = 0;
+ add_tensor(ctx->kv_self.k, kv_vram_size);
+ add_tensor(ctx->kv_self.v, kv_vram_size);
+
+ size_t ctx_vram_size = alloc_size + kv_vram_size;
+ size_t total_vram_size = model_vram_size + ctx_vram_size;
+
+ LLAMA_LOG_INFO("%s: total VRAM used: %.2f MB (model: %.2f MB, context: %.2f MB)\n", __func__,
+ total_vram_size / 1024.0 / 1024.0,
+ model_vram_size / 1024.0 / 1024.0,
+ ctx_vram_size / 1024.0 / 1024.0);
#endif
}
#ifdef GGML_USE_METAL
- if (params.n_gpu_layers > 0) {
+ if (model->n_gpu_layers > 0) {
// this allocates all Metal resources and memory buffers
void * data_ptr = NULL;
size_t data_size = 0;
- if (params.use_mmap) {
+ if (ctx->model.mapping) {
data_ptr = ctx->model.mapping->addr;
data_size = ctx->model.mapping->size;
} else {
return NULL; \
}
- LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
-
- LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.data, ctx->buf_compute.size, 0));
- LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0));
-
+ LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
+ LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0));
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "alloc", ctx->buf_alloc.data, ctx->buf_alloc.size, 0));
#undef LLAMA_METAL_CHECK_BUF
}
if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
// Enter a blocking eval loop with dummy input, letting rank=0 drive the process
- const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
- while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
+ // TODO: needs fix after #3228
+ GGML_ASSERT(false && "not implemented");
+ //const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
+ //while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
llama_backend_free();
exit(1);
}
return ctx;
}
-struct llama_context * llama_init_from_file(
- const char * path_model,
- struct llama_context_params params) {
- struct llama_model * model = llama_load_model_from_file(path_model, params);
- if (!model) {
- return nullptr;
- }
-
- struct llama_context * ctx = llama_new_context_with_model(model, params);
- ctx->model_owner = true;
-
- return ctx;
-}
-
void llama_free(struct llama_context * ctx) {
delete ctx;
}
-int llama_n_vocab(const struct llama_context * ctx) {
- return llama_model_n_vocab(&ctx->model);
+const llama_model * llama_get_model(const struct llama_context * ctx) {
+ return &ctx->model;
}
int llama_n_ctx(const struct llama_context * ctx) {
- return llama_model_n_ctx(&ctx->model);
+ return ctx->cparams.n_ctx;
}
-int llama_n_ctx_train(const struct llama_context * ctx) {
- return llama_model_n_ctx_train(&ctx->model);
+enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
+ return model->vocab.type;
}
-int llama_n_embd(const struct llama_context * ctx) {
- return llama_model_n_embd(&ctx->model);
-}
-
-enum llama_vocab_type llama_vocab_type(const struct llama_context * ctx) {
- return ctx->model.vocab.type;
-}
-
-int llama_model_n_vocab(const struct llama_model * model) {
+int llama_n_vocab(const struct llama_model * model) {
return model->vocab.id_to_token.size();
}
-int llama_model_n_ctx(const struct llama_model * model) {
- return model->hparams.n_ctx;
-}
-
-int llama_model_n_ctx_train(const struct llama_model * model) {
+int llama_n_ctx_train(const struct llama_model * model) {
return model->hparams.n_ctx_train;
}
-int llama_model_n_embd(const struct llama_model * model) {
+int llama_n_embd(const struct llama_model * model) {
return model->hparams.n_embd;
}
+float llama_rope_freq_scale_train(const struct llama_model * model) {
+ return model->hparams.rope_freq_scale_train;
+}
+
int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
return snprintf(buf, buf_size, "%s %s %s",
- model->name.c_str(),
+ llama_model_arch_name(model->arch).c_str(),
llama_model_type_name(model->type),
llama_model_ftype_name(model->ftype).c_str());
}
return nparams;
}
+struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name) {
+ return ggml_get_tensor(model->ctx, name);
+}
+
int llama_model_quantize(
const char * fname_inp,
const char * fname_out,
}
}
-int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) {
+int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, float scale, const char * path_base_model, int n_threads) {
try {
- return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads);
+ return llama_apply_lora_from_file_internal(ctx->model, path_lora, scale, path_base_model, n_threads);
} catch (const std::exception & err) {
LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
return 1;
}
}
-int llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, const char * path_base_model, int n_threads) {
+int llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, float scale, const char * path_base_model, int n_threads) {
try {
- return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads);
+ return llama_apply_lora_from_file_internal(*model, path_lora, scale, path_base_model, n_threads);
} catch (const std::exception & err) {
LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
return 1;
}
int llama_get_kv_cache_token_count(const struct llama_context * ctx) {
- return ctx->kv_self.n;
+ return ctx->kv_self.head;
}
-#define LLAMA_MAX_RNG_STATE (64*1024)
+void llama_kv_cache_clear(struct llama_context * ctx) {
+ llama_kv_cache_clear(ctx->kv_self);
+}
-void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
- if (seed == LLAMA_DEFAULT_SEED) {
- seed = time(NULL);
+void llama_kv_cache_seq_rm(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
+ llama_kv_cache_seq_rm(ctx->kv_self, seq_id, p0, p1);
+}
+
+void llama_kv_cache_seq_cp(struct llama_context * ctx, llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
+ if (seq_id_src == seq_id_dst) {
+ return;
}
- ctx->rng.seed(seed);
+ llama_kv_cache_seq_cp(ctx->kv_self, seq_id_src, seq_id_dst, p0, p1);
+}
+
+void llama_kv_cache_seq_keep(struct llama_context * ctx, llama_seq_id seq_id) {
+ llama_kv_cache_seq_keep(ctx->kv_self, seq_id);
+}
+
+void llama_kv_cache_seq_shift(struct llama_context * ctx, llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos delta) {
+ llama_kv_cache_seq_shift(ctx->kv_self, seq_id, p0, p1, delta);
}
// Returns the *maximum* size of the state
* llama_copy_state_data(ctx, &data_ctx);
*
*/
-void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
+static void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
// copy rng
{
std::stringstream rng_ss;
{
const auto & kv_self = ctx->kv_self;
const auto & hparams = ctx->model.hparams;
- const int n_layer = hparams.n_layer;
- const int n_embd = hparams.n_embd_gqa();
- const int n_ctx = hparams.n_ctx;
+ const auto & cparams = ctx->cparams;
- const size_t kv_size = kv_self.buf.size;
- const int kv_ntok = llama_get_kv_cache_token_count(ctx);
+ const auto n_layer = hparams.n_layer;
+ const auto n_embd = hparams.n_embd_gqa();
+ const auto n_ctx = cparams.n_ctx;
- data_ctx->write(&kv_size, sizeof(kv_size));
- data_ctx->write(&kv_ntok, sizeof(kv_ntok));
+ const size_t kv_buf_size = kv_self.buf.size;
+ const uint32_t kv_head = kv_self.head;
+ const uint32_t kv_size = kv_self.size;
- if (kv_size) {
+ data_ctx->write(&kv_buf_size, sizeof(kv_buf_size));
+ data_ctx->write(&kv_head, sizeof(kv_head));
+ data_ctx->write(&kv_size, sizeof(kv_size));
+
+ if (kv_buf_size) {
const size_t elt_size = ggml_element_size(kv_self.k);
- ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
- ggml_cgraph gf{};
+ ggml_context * cpy_ctx = ggml_init({ 6*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true });
+ ggml_cgraph * gf = ggml_new_graph(cpy_ctx);
- ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
+ ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_head, n_layer);
std::vector<uint8_t> kout3d_data(ggml_nbytes(kout3d), 0);
kout3d->data = kout3d_data.data();
- ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
+ ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_head, n_embd, n_layer);
std::vector<uint8_t> vout3d_data(ggml_nbytes(vout3d), 0);
vout3d->data = vout3d_data.data();
ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
- n_embd, kv_ntok, n_layer,
+ n_embd, kv_head, n_layer,
elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
- kv_ntok, n_embd, n_layer,
+ kv_head, n_embd, n_layer,
elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
- ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, k3d, kout3d));
- ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, v3d, vout3d));
- ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
+ ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, k3d, kout3d));
+ ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, v3d, vout3d));
+ ggml_graph_compute_helper(ctx->work_buffer, gf, /*n_threads*/ 1);
ggml_free(cpy_ctx);
data_ctx->write(kout3d_data.data(), kout3d_data.size());
data_ctx->write(vout3d_data.data(), vout3d_data.size());
}
+
+ for (uint32_t i = 0; i < kv_size; ++i) {
+ const auto & cell = kv_self.cells[i];
+
+ const llama_pos pos = cell.pos;
+ const size_t seq_id_size = cell.seq_id.size();
+
+ data_ctx->write(&pos, sizeof(pos));
+ data_ctx->write(&seq_id_size, sizeof(seq_id_size));
+
+ for (auto seq_id : cell.seq_id) {
+ data_ctx->write(&seq_id, sizeof(seq_id));
+ }
+ }
}
}
{
const auto & kv_self = ctx->kv_self;
const auto & hparams = ctx->model.hparams;
+ const auto & cparams = ctx->cparams;
+
const int n_layer = hparams.n_layer;
const int n_embd = hparams.n_embd_gqa();
- const int n_ctx = hparams.n_ctx;
+ const int n_ctx = cparams.n_ctx;
- size_t kv_size;
- int kv_ntok;
+ size_t kv_buf_size;
+ uint32_t kv_head;
+ uint32_t kv_size;
- memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
- memcpy(&kv_ntok, inp, sizeof(kv_ntok)); inp += sizeof(kv_ntok);
+ memcpy(&kv_buf_size, inp, sizeof(kv_buf_size)); inp += sizeof(kv_buf_size);
+ memcpy(&kv_head, inp, sizeof(kv_head)); inp += sizeof(kv_head);
+ memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
- if (kv_size) {
- GGML_ASSERT(kv_self.buf.size == kv_size);
+ if (kv_buf_size) {
+ GGML_ASSERT(kv_self.buf.size == kv_buf_size);
const size_t elt_size = ggml_element_size(kv_self.k);
- ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
- ggml_cgraph gf{};
+ ggml_context * cpy_ctx = ggml_init({ 6*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true });
+ ggml_cgraph * gf = ggml_new_graph(cpy_ctx);
- ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
+ ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_head, n_layer);
kin3d->data = (void *) inp;
inp += ggml_nbytes(kin3d);
- ggml_tensor * vin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
+ ggml_tensor * vin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_head, n_embd, n_layer);
vin3d->data = (void *) inp;
inp += ggml_nbytes(vin3d);
ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
- n_embd, kv_ntok, n_layer,
+ n_embd, kv_head, n_layer,
elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
- kv_ntok, n_embd, n_layer,
+ kv_head, n_embd, n_layer,
elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
- ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, kin3d, k3d));
- ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, vin3d, v3d));
- ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
+ ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, kin3d, k3d));
+ ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, vin3d, v3d));
+ ggml_graph_compute_helper(ctx->work_buffer, gf, /*n_threads*/ 1);
ggml_free(cpy_ctx);
}
- ctx->kv_self.n = kv_ntok;
+ ctx->kv_self.head = kv_head;
+ ctx->kv_self.size = kv_size;
+
+ ctx->kv_self.cells.resize(kv_size);
+
+ for (uint32_t i = 0; i < kv_size; ++i) {
+ llama_pos pos;
+ size_t seq_id_size;
+
+ memcpy(&pos, inp, sizeof(pos)); inp += sizeof(pos);
+ memcpy(&seq_id_size, inp, sizeof(seq_id_size)); inp += sizeof(seq_id_size);
+
+ ctx->kv_self.cells[i].pos = pos;
+
+ llama_seq_id seq_id;
+
+ for (size_t j = 0; j < seq_id_size; ++j) {
+ memcpy(&seq_id, inp, sizeof(seq_id)); inp += sizeof(seq_id);
+ ctx->kv_self.cells[i].seq_id.insert(seq_id);
+ }
+ }
}
const size_t nread = inp - src;
int llama_eval(
struct llama_context * ctx,
- const llama_token * tokens,
- int n_tokens,
- int n_past,
- int n_threads) {
- if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) {
- LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
- return 1;
- }
+ llama_token * tokens,
+ int32_t n_tokens,
+ int n_past) {
+ llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1);
- // get a more accurate load time, upon first eval
- // TODO: fix this
- if (!ctx->has_evaluated_once) {
- ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
- ctx->has_evaluated_once = true;
+ const int ret = llama_decode_internal(*ctx, llama_batch_get_one(tokens, n_tokens, n_past, 0));
+ if (ret < 0) {
+ LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
}
- return 0;
+ return ret;
}
int llama_eval_embd(
struct llama_context * ctx,
- const float * embd,
- int n_tokens,
- int n_past,
- int n_threads) {
- if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) {
- LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
- return 1;
- }
+ float * embd,
+ int32_t n_tokens,
+ int n_past) {
+ llama_kv_cache_seq_rm(ctx->kv_self, -1, n_past, -1);
- // get a more accurate load time, upon first eval
- // TODO: fix this
- if (!ctx->has_evaluated_once) {
- ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
- ctx->has_evaluated_once = true;
+ llama_batch batch = { n_tokens, nullptr, embd, nullptr, nullptr, nullptr, nullptr, n_past, 1, 0, };
+
+ const int ret = llama_decode_internal(*ctx, batch);
+ if (ret < 0) {
+ LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
}
- return 0;
+ return ret;
+}
+
+void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch) {
+ ctx->cparams.n_threads = n_threads;
+ ctx->cparams.n_threads_batch = n_threads_batch;
+}
+
+struct llama_batch llama_batch_get_one(
+ llama_token * tokens,
+ int32_t n_tokens,
+ llama_pos pos_0,
+ llama_seq_id seq_id) {
+ return {
+ /*n_tokens =*/ n_tokens,
+ /*tokens =*/ tokens,
+ /*embd =*/ nullptr,
+ /*pos =*/ nullptr,
+ /*n_seq_id =*/ nullptr,
+ /*seq_id =*/ nullptr,
+ /*logits =*/ nullptr,
+ /*all_pos_0 =*/ pos_0,
+ /*all_pos_1 =*/ 1,
+ /*all_seq_id =*/ seq_id,
+ };
}
-int llama_eval_export(struct llama_context * ctx, const char * fname) {
- const int n_batch = 1;
- const int n_ctx = 512 - n_batch;
+struct llama_batch llama_batch_init(int32_t n_tokens, int32_t embd, int32_t n_seq_max) {
+ llama_batch batch = { 0, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, 0, 0, 0, };
- const std::vector<llama_token> tmp(n_batch, llama_token_bos(ctx));
+ if (embd) {
+ batch.embd = (float *) malloc(sizeof(float) * n_tokens * embd);
+ } else {
+ batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens);
+ }
- if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) {
- LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
- return 1;
+ batch.pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens);
+ batch.n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens);
+ batch.seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * n_tokens);
+ for (int i = 0; i < n_tokens; ++i) {
+ batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
}
+ batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens);
- return 0;
+ return batch;
+}
+
+void llama_batch_free(struct llama_batch batch) {
+ if (batch.token) free(batch.token);
+ if (batch.embd) free(batch.embd);
+ if (batch.pos) free(batch.pos);
+ if (batch.n_seq_id) free(batch.n_seq_id);
+ if (batch.seq_id) {
+ for (int i = 0; i < batch.n_tokens; ++i) {
+ free(batch.seq_id[i]);
+ }
+ free(batch.seq_id);
+ }
+ if (batch.logits) free(batch.logits);
+}
+
+int llama_decode(
+ struct llama_context * ctx,
+ struct llama_batch batch) {
+ const int ret = llama_decode_internal(*ctx, batch);
+ if (ret < 0) {
+ LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
+ }
+
+ return ret;
}
float * llama_get_logits(struct llama_context * ctx) {
return ctx->logits.data();
}
+float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
+ return ctx->logits.data() + i*ctx->model.hparams.n_vocab;
+}
+
float * llama_get_embeddings(struct llama_context * ctx) {
return ctx->embedding.data();
}
-const char * llama_token_get_text(const struct llama_context * ctx, llama_token token) {
- return ctx->model.vocab.id_to_token[token].text.c_str();
+const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
+ return model->vocab.id_to_token[token].text.c_str();
}
-float llama_token_get_score(const struct llama_context * ctx, llama_token token) {
- return ctx->model.vocab.id_to_token[token].score;
+float llama_token_get_score(const struct llama_model * model, llama_token token) {
+ return model->vocab.id_to_token[token].score;
}
-llama_token_type llama_token_get_type(const struct llama_context * ctx, llama_token token) {
- return ctx->model.vocab.id_to_token[token].type;
+llama_token_type llama_token_get_type(const struct llama_model * model, llama_token token) {
+ return model->vocab.id_to_token[token].type;
}
-llama_token llama_token_bos(const struct llama_context * ctx) {
- return ctx->model.vocab.special_bos_id;
+llama_token llama_token_bos(const struct llama_model * model) {
+ return model->vocab.special_bos_id;
}
-llama_token llama_token_eos(const struct llama_context * ctx) {
- return ctx->model.vocab.special_eos_id;
+llama_token llama_token_eos(const struct llama_model * model) {
+ return model->vocab.special_eos_id;
}
-llama_token llama_token_nl(const struct llama_context * ctx) {
- return ctx->model.vocab.linefeed_id;
+llama_token llama_token_nl(const struct llama_model * model) {
+ return model->vocab.linefeed_id;
}
-int llama_tokenize(
- struct llama_context * ctx,
- const char * text,
- llama_token * tokens,
- int n_max_tokens,
- bool add_bos) {
- return llama_tokenize_with_model(&ctx->model, text, tokens, n_max_tokens, add_bos);
+llama_token llama_token_prefix(const struct llama_model * model) {
+ return model->vocab.special_prefix_id;
+}
+
+llama_token llama_token_middle(const struct llama_model * model) {
+ return model->vocab.special_middle_id;
}
-int llama_tokenize_with_model(
+llama_token llama_token_suffix(const struct llama_model * model) {
+ return model->vocab.special_suffix_id;
+}
+
+llama_token llama_token_eot(const struct llama_model * model) {
+ return model->vocab.special_eot_id;
+}
+
+int llama_tokenize(
const struct llama_model * model,
const char * text,
+ int text_len,
llama_token * tokens,
int n_max_tokens,
- bool add_bos) {
- auto res = llama_tokenize_internal(model->vocab, text, add_bos);
+ bool add_bos,
+ bool special) {
+ auto res = llama_tokenize_internal(model->vocab, std::string(text, text_len), add_bos, special);
if (n_max_tokens < (int) res.size()) {
// LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
return res.size();
}
-int llama_token_to_piece(const struct llama_context * ctx, llama_token token, char * buf, int length) {
- return llama_token_to_piece_with_model(&ctx->model, token, buf, length);
+static std::string llama_decode_text(const std::string & text) {
+ std::string decoded_text;
+ auto unicode_sequences = codepoints_from_utf8(text);
+ for (auto& unicode_sequence : unicode_sequences) {
+ decoded_text += unicode_to_bytes_bpe(codepoint_to_utf8(unicode_sequence));
+ }
+
+ return decoded_text;
}
// does not write null-terminator to buf
-int llama_token_to_piece_with_model(const struct llama_model * model, llama_token token, char * buf, int length) {
- if (0 <= token && token < llama_model_n_vocab(model)) {
- if (llama_is_normal_token(model->vocab, token)) {
- std::string result = model->vocab.id_to_token[token].text;
- if (llama_vocab_get_type(model->vocab) == LLAMA_VOCAB_TYPE_SPM) {
+int llama_token_to_piece(const struct llama_model * model, llama_token token, char * buf, int length) {
+ if (0 <= token && token < llama_n_vocab(model)) {
+ switch (llama_vocab_get_type(model->vocab)) {
+ case LLAMA_VOCAB_TYPE_SPM: {
+ if (llama_is_normal_token(model->vocab, token)) {
+ std::string result = model->vocab.id_to_token[token].text;
llama_unescape_whitespace(result);
+ if (length < (int) result.length()) {
+ return -result.length();
+ }
+ memcpy(buf, result.c_str(), result.length());
+ return result.length();
+ } else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
+ if (length < 3) {
+ return -3;
+ }
+ memcpy(buf, "\xe2\x96\x85", 3);
+ return 3;
+ } else if (llama_is_control_token(model->vocab, token)) {
+ ;
+ } else if (llama_is_byte_token(model->vocab, token)) {
+ if (length < 1) {
+ return -1;
+ }
+ buf[0] = llama_token_to_byte(model->vocab, token);
+ return 1;
+ } else {
+ // TODO: for now we accept all unsupported token types,
+ // suppressing them like CONTROL tokens.
+ // GGML_ASSERT(false);
}
- if (length < (int) result.length()) {
- return -result.length();
- }
- memcpy(buf, result.c_str(), result.length());
- return result.length();
- } else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
- if (length < 3) {
- return -3;
- }
- buf[0] = '\xe2';
- buf[1] = '\x96';
- buf[2] = '\x85';
- return 3;
- } else if (llama_is_control_token(model->vocab, token)) {
- ;
- } else if (llama_is_byte_token(model->vocab, token)) {
- if (length < 1) {
- return -1;
+ break;
+ }
+ case LLAMA_VOCAB_TYPE_BPE: {
+ if (llama_is_normal_token(model->vocab, token)) {
+ std::string result = model->vocab.id_to_token[token].text;
+ result = llama_decode_text(result);
+ if (length < (int) result.length()) {
+ return -result.length();
+ }
+ memcpy(buf, result.c_str(), result.length());
+ return result.length();
+ } else if (llama_is_control_token(model->vocab, token)) {
+ ;
+ } else {
+ // TODO: for now we accept all unsupported token types,
+ // suppressing them like CONTROL tokens.
+ // GGML_ASSERT(false);
}
- buf[0] = llama_token_to_byte(model->vocab, token);
- return 1;
+ break;
+ }
+ default:
+ GGML_ASSERT(false);
}
}
return 0;
const llama_timings timings = llama_get_timings(ctx);
LLAMA_LOG_INFO("\n");
- LLAMA_LOG_INFO("%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
- LLAMA_LOG_INFO("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
+ LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms);
+ LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
__func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
- LLAMA_LOG_INFO("%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
+ LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
__func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
- LLAMA_LOG_INFO("%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
+ LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
__func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
- LLAMA_LOG_INFO("%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
+ LLAMA_LOG_INFO("%s: total time = %10.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
}
void llama_reset_timings(struct llama_context * ctx) {
}
// For internal test use
-const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
+const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
+ struct llama_context * ctx
+) {
return ctx->model.tensors_by_name;
}
-void llama_log_set(llama_log_callback log_callback, void * user_data) {
+void llama_log_set(ggml_log_callback log_callback, void * user_data) {
g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
g_state.log_callback_user_data = user_data;
}
-static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) {
+static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
va_list args_copy;
va_copy(args_copy, args);
char buffer[128];
va_end(args_copy);
}
-static void llama_log_internal(llama_log_level level, const char * format, ...) {
+static void llama_log_internal(ggml_log_level level, const char * format, ...) {
va_list args;
va_start(args, format);
llama_log_internal_v(level, format, args);
va_end(args);
}
-static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) {
+static void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
(void) level;
(void) user_data;
fputs(text, stderr);
#define LLAMA_DEFAULT_SEED 0xFFFFFFFF
+#define LLAMA_MAX_RNG_STATE (64*1024)
+
#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
-#define LLAMA_SESSION_VERSION 1
+#define LLAMA_SESSION_VERSION 2
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
struct llama_model;
struct llama_context;
- typedef int llama_token;
-
- enum llama_log_level {
- LLAMA_LOG_LEVEL_ERROR = 2,
- LLAMA_LOG_LEVEL_WARN = 3,
- LLAMA_LOG_LEVEL_INFO = 4
- };
+ typedef int32_t llama_pos;
+ typedef int32_t llama_token;
+ typedef int32_t llama_seq_id;
enum llama_vocab_type {
LLAMA_VOCAB_TYPE_SPM = 0, // SentencePiece
// model file types
enum llama_ftype {
LLAMA_FTYPE_ALL_F32 = 0,
- LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
- // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
- // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
- LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors
- LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors
+ LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
+ // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
+ // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
+ LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
};
+ enum llama_rope_scaling_type {
+ LLAMA_ROPE_SCALING_UNSPECIFIED = -1,
+ LLAMA_ROPE_SCALING_NONE = 0,
+ LLAMA_ROPE_SCALING_LINEAR = 1,
+ LLAMA_ROPE_SCALING_YARN = 2,
+ LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN,
+ };
+
typedef struct llama_token_data {
llama_token id; // token id
float logit; // log-odds of the token
typedef void (*llama_progress_callback)(float progress, void *ctx);
- struct llama_context_params {
- uint32_t seed; // RNG seed, -1 for random
- int32_t n_ctx; // text context
- int32_t n_batch; // prompt processing batch size
- int32_t n_gpu_layers; // number of layers to store in VRAM
- int32_t main_gpu; // the GPU that is used for scratch and small tensors
-
+ // Input data for llama_decode
+ // A llama_batch object can contain input about one or many sequences
+ // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
+ //
+ // - token : the token ids of the input (used when embd is NULL)
+ // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
+ // - pos : the positions of the respective token in the sequence
+ // - seq_id : the sequence to which the respective token belongs
+ // - logits : if zero, the logits for the respective token will not be output
+ //
+ typedef struct llama_batch {
+ int32_t n_tokens;
+
+ llama_token * token;
+ float * embd;
+ llama_pos * pos;
+ int32_t * n_seq_id;
+ llama_seq_id ** seq_id;
+ int8_t * logits;
+
+ // NOTE: helpers for smooth API transition - can be deprecated in the future
+ // for future-proof code, use the above fields instead and ignore everything below
+ //
+ // pos[i] = all_pos_0 + i*all_pos_1
+ //
+ llama_pos all_pos_0; // used if pos == NULL
+ llama_pos all_pos_1; // used if pos == NULL
+ llama_seq_id all_seq_id; // used if seq_id == NULL
+ } llama_batch;
+
+ struct llama_model_params {
+ int32_t n_gpu_layers; // number of layers to store in VRAM
+ int32_t main_gpu; // the GPU that is used for scratch and small tensors
const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES)
- // ref: https://github.com/ggerganov/llama.cpp/pull/2054
- float rope_freq_base; // RoPE base frequency
- float rope_freq_scale; // RoPE frequency scaling factor
-
// called with a progress value between 0 and 1, pass NULL to disable
llama_progress_callback progress_callback;
// context pointer passed to the progress callback
void * progress_callback_user_data;
// Keep the booleans together to avoid misalignment during copy-by-value.
- bool low_vram; // if true, reduce VRAM usage at the cost of performance
- bool mul_mat_q; // if true, use experimental mul_mat_q kernels
- bool f16_kv; // use fp16 for KV cache
- bool logits_all; // the llama_eval() call computes all logits, not just the last one
bool vocab_only; // only load the vocabulary, no weights
bool use_mmap; // use mmap if possible
bool use_mlock; // force system to keep model in RAM
- bool embedding; // embedding mode only
};
- // Signature for logging events
- // Note that text includes the new line character at the end for most events.
- // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
- // if it exists.
- // It might not exist for progress report where '.' is output repeatedly.
- typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data);
+ struct llama_context_params {
+ uint32_t seed; // RNG seed, -1 for random
+ uint32_t n_ctx; // text context, 0 = from model
+ uint32_t n_batch; // prompt processing maximum batch size
+ uint32_t n_threads; // number of threads to use for generation
+ uint32_t n_threads_batch; // number of threads to use for batch processing
+ int8_t rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
+
+ // ref: https://github.com/ggerganov/llama.cpp/pull/2054
+ float rope_freq_base; // RoPE base frequency, 0 = from model
+ float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
+ float yarn_ext_factor; // YaRN extrapolation mix factor, NaN = from model
+ float yarn_attn_factor; // YaRN magnitude scaling factor
+ float yarn_beta_fast; // YaRN low correction dim
+ float yarn_beta_slow; // YaRN high correction dim
+ uint32_t yarn_orig_ctx; // YaRN original context size
+
+ // Keep the booleans together to avoid misalignment during copy-by-value.
+ bool mul_mat_q; // if true, use experimental mul_mat_q kernels (DEPRECATED - always true)
+ bool f16_kv; // use fp16 for KV cache, fp32 otherwise
+ bool logits_all; // the llama_eval() call computes all logits, not just the last one
+ bool embedding; // embedding mode only
+ };
// model quantization parameters
typedef struct llama_model_quantize_params {
bool allow_requantize; // allow quantizing non-f32/f16 tensors
bool quantize_output_tensor; // quantize output.weight
bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
+ bool pure; // disable k-quant mixtures and quantize all tensors to the same type
} llama_model_quantize_params;
// grammar types
int32_t n_eval;
};
+ // Helpers for getting default parameters
+ LLAMA_API struct llama_model_params llama_model_default_params(void);
LLAMA_API struct llama_context_params llama_context_default_params(void);
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
LLAMA_API struct llama_model * llama_load_model_from_file(
const char * path_model,
- struct llama_context_params params);
+ struct llama_model_params params);
LLAMA_API void llama_free_model(struct llama_model * model);
LLAMA_API bool llama_mmap_supported (void);
LLAMA_API bool llama_mlock_supported(void);
- LLAMA_API int llama_n_vocab (const struct llama_context * ctx);
+ LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
+
LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
- LLAMA_API int llama_n_ctx_train(const struct llama_context * ctx);
- LLAMA_API int llama_n_embd (const struct llama_context * ctx);
- LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_context * ctx);
+ LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_model * model);
+
+ LLAMA_API int llama_n_vocab (const struct llama_model * model);
+ LLAMA_API int llama_n_ctx_train(const struct llama_model * model);
+ LLAMA_API int llama_n_embd (const struct llama_model * model);
- LLAMA_API int llama_model_n_vocab (const struct llama_model * model);
- LLAMA_API int llama_model_n_ctx (const struct llama_model * model);
- LLAMA_API int llama_model_n_ctx_train(const struct llama_model * model);
- LLAMA_API int llama_model_n_embd (const struct llama_model * model);
+ // Get the model's RoPE frequency scaling factor
+ LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
// Get a string describing the model type
LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
+
// Returns the total size of all the tensors in the model in bytes
LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
+
// Returns the total number of parameters in the model
LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
+ // Get a llama model tensor
+ LLAMA_API struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name);
+
// Returns 0 on success
LLAMA_API int llama_model_quantize(
const char * fname_inp,
LLAMA_API DEPRECATED(int llama_apply_lora_from_file(
struct llama_context * ctx,
const char * path_lora,
+ float scale,
const char * path_base_model,
int n_threads),
- "please use llama_model_apply_lora_from_file instead");
+ "use llama_model_apply_lora_from_file instead");
LLAMA_API int llama_model_apply_lora_from_file(
const struct llama_model * model,
- const char * path_lora,
- const char * path_base_model,
- int n_threads);
+ const char * path_lora,
+ float scale,
+ const char * path_base_model,
+ int n_threads);
+
+ //
+ // KV cache
+ //
// Returns the number of tokens in the KV cache
- LLAMA_API int llama_get_kv_cache_token_count(const struct llama_context * ctx);
+ LLAMA_API DEPRECATED(int llama_get_kv_cache_token_count(const struct llama_context * ctx),
+ "avoid using this, it will be removed in the future, instead - count the tokens in user code");
+
+ // Clear the KV cache
+ LLAMA_API void llama_kv_cache_clear(
+ struct llama_context * ctx);
+
+ // Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
+ // seq_id < 0 : match any sequence
+ // p0 < 0 : [0, p1]
+ // p1 < 0 : [p0, inf)
+ LLAMA_API void llama_kv_cache_seq_rm(
+ struct llama_context * ctx,
+ llama_seq_id seq_id,
+ llama_pos p0,
+ llama_pos p1);
+
+ // Copy all tokens that belong to the specified sequence to another sequence
+ // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
+ // p0 < 0 : [0, p1]
+ // p1 < 0 : [p0, inf)
+ LLAMA_API void llama_kv_cache_seq_cp(
+ struct llama_context * ctx,
+ llama_seq_id seq_id_src,
+ llama_seq_id seq_id_dst,
+ llama_pos p0,
+ llama_pos p1);
- // Sets the current rng seed.
- LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
+ // Removes all tokens that do not belong to the specified sequence
+ LLAMA_API void llama_kv_cache_seq_keep(
+ struct llama_context * ctx,
+ llama_seq_id seq_id);
+
+ // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
+ // If the KV cache is RoPEd, the KV data is updated accordingly
+ // p0 < 0 : [0, p1]
+ // p1 < 0 : [p0, inf)
+ LLAMA_API void llama_kv_cache_seq_shift(
+ struct llama_context * ctx,
+ llama_seq_id seq_id,
+ llama_pos p0,
+ llama_pos p1,
+ llama_pos delta);
+
+ //
+ // State / sessions
+ //
// Returns the maximum size in bytes of the state (rng, logits, embedding
// and kv_cache) - will often be smaller after compacting tokens
// Copies the state to the specified destination address.
// Destination needs to have allocated enough memory.
// Returns the number of bytes copied
- LLAMA_API size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst);
+ LLAMA_API size_t llama_copy_state_data(
+ struct llama_context * ctx,
+ uint8_t * dst);
// Set the state reading from the specified address
// Returns the number of bytes read
- LLAMA_API size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src);
+ LLAMA_API size_t llama_set_state_data(
+ struct llama_context * ctx,
+ uint8_t * src);
// Save/load session file
- LLAMA_API bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out);
- LLAMA_API bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count);
+ LLAMA_API bool llama_load_session_file(
+ struct llama_context * ctx,
+ const char * path_session,
+ llama_token * tokens_out,
+ size_t n_token_capacity,
+ size_t * n_token_count_out);
- // Run the llama inference to obtain the logits and probabilities for the next token.
+ LLAMA_API bool llama_save_session_file(
+ struct llama_context * ctx,
+ const char * path_session,
+ const llama_token * tokens,
+ size_t n_token_count);
+
+ //
+ // Decoding
+ //
+
+ // Run the llama inference to obtain the logits and probabilities for the next token(s).
// tokens + n_tokens is the provided batch of new tokens to process
// n_past is the number of tokens to use from previous eval calls
// Returns 0 on success
- LLAMA_API int llama_eval(
+ // DEPRECATED: use llama_decode() instead
+ LLAMA_API DEPRECATED(int llama_eval(
struct llama_context * ctx,
- const llama_token * tokens,
- int n_tokens,
- int n_past,
- int n_threads);
+ llama_token * tokens,
+ int32_t n_tokens,
+ int n_past),
+ "use llama_decode() instead");
// Same as llama_eval, but use float matrix input directly.
- LLAMA_API int llama_eval_embd(
+ // DEPRECATED: use llama_decode() instead
+ LLAMA_API DEPRECATED(int llama_eval_embd(
struct llama_context * ctx,
- const float * embd,
- int n_tokens,
- int n_past,
- int n_threads);
+ float * embd,
+ int32_t n_tokens,
+ int n_past),
+ "use llama_decode() instead");
+
+ // Return batch for single sequence of tokens starting at pos_0
+ //
+ // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
+ //
+ LLAMA_API struct llama_batch llama_batch_get_one(
+ llama_token * tokens,
+ int32_t n_tokens,
+ llama_pos pos_0,
+ llama_seq_id seq_id);
+
+ // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
+ // Each token can be assigned up to n_seq_max sequence ids
+ // The batch has to be freed with llama_batch_free()
+ // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
+ // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
+ // The rest of the llama_batch members are allocated with size n_tokens
+ // All members are left uninitialized
+ LLAMA_API struct llama_batch llama_batch_init(
+ int32_t n_tokens,
+ int32_t embd,
+ int32_t n_seq_max);
+
+ // Frees a batch of tokens allocated with llama_batch_init()
+ LLAMA_API void llama_batch_free(struct llama_batch batch);
+
+ // Positive return values does not mean a fatal error, but rather a warning.
+ // 0 - success
+ // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
+ // < 0 - error
+ LLAMA_API int llama_decode(
+ struct llama_context * ctx,
+ struct llama_batch batch);
- // Export a static computation graph for context of 511 and batch size of 1
- // NOTE: since this functionality is mostly for debugging and demonstration purposes, we hardcode these
- // parameters here to keep things simple
- // IMPORTANT: do not use for anything else other than debugging and testing!
- LLAMA_API int llama_eval_export(struct llama_context * ctx, const char * fname);
+ // Set the number of threads used for decoding
+ // n_threads is the number of threads used for generation (single token)
+ // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
+ LLAMA_API void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch);
// Token logits obtained from the last call to llama_eval()
// The logits for the last token are stored in the last row
- // Can be mutated in order to change the probabilities of the next token
- // Rows: n_tokens
+ // Logits for which llama_batch.logits[i] == 0 are undefined
+ // Rows: n_tokens provided with llama_batch
// Cols: n_vocab
LLAMA_API float * llama_get_logits(struct llama_context * ctx);
+ // Logits for the ith token. Equivalent to:
+ // llama_get_logits(ctx) + i*n_vocab
+ LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i);
+
// Get the embeddings for the input
// shape: [n_embd] (1-dimensional)
LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
// Vocab
//
- LLAMA_API const char * llama_token_get_text(const struct llama_context * ctx, llama_token token);
+ LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token);
- LLAMA_API float llama_token_get_score(const struct llama_context * ctx, llama_token token);
+ LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token);
- LLAMA_API enum llama_token_type llama_token_get_type(const struct llama_context * ctx, llama_token token);
+ LLAMA_API enum llama_token_type llama_token_get_type(const struct llama_model * model, llama_token token);
// Special tokens
- LLAMA_API llama_token llama_token_bos(const struct llama_context * ctx); // beginning-of-sentence
- LLAMA_API llama_token llama_token_eos(const struct llama_context * ctx); // end-of-sentence
- LLAMA_API llama_token llama_token_nl (const struct llama_context * ctx); // next-line
+ LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence
+ LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence
+ LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
+
+ // codellama infill tokens
+ LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
+ LLAMA_API llama_token llama_token_middle(const struct llama_model * model); // Beginning of infill middle
+ LLAMA_API llama_token llama_token_suffix(const struct llama_model * model); // Beginning of infill suffix
+ LLAMA_API llama_token llama_token_eot (const struct llama_model * model); // End of infill middle
//
// Tokenization
//
- // Convert the provided text into tokens.
- // The tokens pointer must be large enough to hold the resulting tokens.
- // Returns the number of tokens on success, no more than n_max_tokens
- // Returns a negative number on failure - the number of tokens that would have been returned
+ /// @details Convert the provided text into tokens.
+ /// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
+ /// @return Returns the number of tokens on success, no more than n_max_tokens
+ /// @return Returns a negative number on failure - the number of tokens that would have been returned
+ /// @param special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated as plaintext.
+ /// Does not insert a leading space.
LLAMA_API int llama_tokenize(
- struct llama_context * ctx,
- const char * text,
- llama_token * tokens,
- int n_max_tokens,
- bool add_bos);
-
- LLAMA_API int llama_tokenize_with_model(
const struct llama_model * model,
const char * text,
+ int text_len,
llama_token * tokens,
int n_max_tokens,
- bool add_bos);
+ bool add_bos,
+ bool special);
// Token Id -> Piece.
// Uses the vocabulary in the provided context.
// Does not write null terminator to the buffer.
// User code is responsible to remove the leading whitespace of the first non-BOS token when decoding multiple tokens.
LLAMA_API int llama_token_to_piece(
- const struct llama_context * ctx,
- llama_token token,
- char * buf,
- int length);
-
- LLAMA_API int llama_token_to_piece_with_model(
const struct llama_model * model,
llama_token token,
char * buf,
// Sampling functions
//
- /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
- LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty);
+ // Sets the current rng seed.
+ LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
+ /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
/// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
- LLAMA_API void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence);
+ LLAMA_API void llama_sample_repetition_penalties(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ const llama_token * last_tokens,
+ size_t penalty_last_n,
+ float penalty_repeat,
+ float penalty_freq,
+ float penalty_present);
/// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, the logits must be directly extracted from the original generation context without being sorted.
float scale);
/// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
- LLAMA_API void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates);
+ LLAMA_API void llama_sample_softmax(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates);
/// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
- LLAMA_API void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep);
+ LLAMA_API void llama_sample_top_k(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ int k,
+ size_t min_keep);
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
- LLAMA_API void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
+ LLAMA_API void llama_sample_top_p(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ float p,
+ size_t min_keep);
+
+ /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
+ LLAMA_API void llama_sample_min_p(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ float p,
+ size_t min_keep);
/// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
- LLAMA_API void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep);
+ LLAMA_API void llama_sample_tail_free(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ float z,
+ size_t min_keep);
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
- LLAMA_API void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
- LLAMA_API void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates, float temp);
+ LLAMA_API void llama_sample_typical(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ float p,
+ size_t min_keep);
+
+ LLAMA_API void llama_sample_temp(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ float temp);
+
+ LLAMA_API DEPRECATED(void llama_sample_temperature(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ float temp),
+ "use llama_sample_temp instead");
/// @details Apply constraints from grammar
- LLAMA_API void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar);
+ LLAMA_API void llama_sample_grammar(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ const struct llama_grammar * grammar);
/// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
/// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
- LLAMA_API llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu);
+ LLAMA_API llama_token llama_sample_token_mirostat(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ float tau,
+ float eta,
+ int m,
+ float * mu);
/// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
- LLAMA_API llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu);
+ LLAMA_API llama_token llama_sample_token_mirostat_v2(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ float tau,
+ float eta,
+ float * mu);
/// @details Selects the token with the highest probability.
- LLAMA_API llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates);
+ /// Does not compute the token probabilities. Use llama_sample_softmax() instead.
+ LLAMA_API llama_token llama_sample_token_greedy(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates);
/// @details Randomly selects a token from the candidates based on their probabilities.
- LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
+ LLAMA_API llama_token llama_sample_token(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates);
/// @details Accepts the sampled token into the grammar
- LLAMA_API void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token);
+ LLAMA_API void llama_grammar_accept_token(
+ struct llama_context * ctx,
+ struct llama_grammar * grammar,
+ llama_token token);
//
// Beam search
struct llama_beam_view {
const llama_token * tokens;
+
size_t n_tokens;
- float p; // Cumulative beam probability (renormalized relative to all beams)
- bool eob; // Callback should set this to true when a beam is at end-of-beam.
+ float p; // Cumulative beam probability (renormalized relative to all beams)
+ bool eob; // Callback should set this to true when a beam is at end-of-beam.
};
// Passed to beam_search_callback function.
// These pointers are valid only during the synchronous callback, so should not be saved.
struct llama_beams_state {
struct llama_beam_view * beam_views;
+
size_t n_beams; // Number of elements in beam_views[].
size_t common_prefix_length; // Current max length of prefix tokens shared by all beams.
- bool last_call; // True iff this is the last callback invocation.
+ bool last_call; // True iff this is the last callback invocation.
};
// Type of pointer to the beam_search_callback function.
/// @param n_beams Number of beams to use.
/// @param n_past Number of tokens already evaluated.
/// @param n_predict Maximum number of tokens to predict. EOS may occur earlier.
- /// @param n_threads Number of threads as passed to llama_eval().
- LLAMA_API void llama_beam_search(struct llama_context * ctx, llama_beam_search_callback_fn_t callback, void * callback_data, size_t n_beams, int n_past, int n_predict, int n_threads);
+ LLAMA_API void llama_beam_search(
+ struct llama_context * ctx,
+ llama_beam_search_callback_fn_t callback,
+ void * callback_data,
+ size_t n_beams,
+ int n_past,
+ int n_predict);
// Performance information
LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
+
LLAMA_API void llama_print_timings(struct llama_context * ctx);
LLAMA_API void llama_reset_timings(struct llama_context * ctx);
// Set callback for all future logging events.
// If this is not called, or NULL is supplied, everything is output on stderr.
- LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data);
+ LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data);
LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx);
struct ggml_tensor;
-const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx);
+const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal_get_tensor_map(
+ struct llama_context * ctx
+);
#endif // LLAMA_API_INTERNAL
#include <regex>
std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos) {
- // initialize to prompt numer of chars, since n_tokens <= n_prompt_chars
- std::vector<llama_token> res(text.size() + (int)add_bos);
- int n = llama_tokenize(ctx, text.c_str(), res.data(), res.size(), add_bos);
- assert(n >= 0);
- res.resize(n);
+ auto * model = llama_get_model(ctx);
- return res;
+ // upper limit for the number of tokens
+ int n_tokens = text.length() + add_bos;
+ std::vector<llama_token> result(n_tokens);
+ n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, false);
+ if (n_tokens < 0) {
+ result.resize(-n_tokens);
+ int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, false);
+ GGML_ASSERT(check == -n_tokens);
+ } else {
+ result.resize(n_tokens);
+ }
+ return result;
}
std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) {
std::vector<char> result(8, 0);
- const int n_tokens = llama_token_to_piece(ctx, token, result.data(), result.size());
+ const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
if (n_tokens < 0) {
result.resize(-n_tokens);
- int check = llama_token_to_piece(ctx, token, result.data(), result.size());
+ int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size());
GGML_ASSERT(check == -n_tokens);
} else {
result.resize(n_tokens);
llama_backend_init(true);
- auto lparams = llama_context_default_params();
+ auto lmparams = llama_model_default_params();
- // tune these to your liking
- lparams.n_ctx = 2048;
- lparams.seed = 1;
- lparams.f16_kv = true;
+ struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);
- struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lparams);
+ llama_context_params lcparams = llama_context_default_params();
+
+ // tune these to your liking
+ lcparams.n_ctx = 2048;
+ lcparams.seed = 1;
+ lcparams.f16_kv = true;
+ lcparams.n_threads = params.n_threads;
- struct llama_context * ctx_llama = llama_new_context_with_model(model_llama, lparams);
+ struct llama_context * ctx_llama = llama_new_context_with_model(model_llama, lcparams);
// print some info about the processing
{
if (fp != NULL) {
std::fclose(fp);
- session_tokens.resize(lparams.n_ctx);
+ session_tokens.resize(llama_n_ctx(ctx_llama));
size_t n_token_count_out = 0;
if (!llama_load_session_file(ctx_llama, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
fprintf(stderr, "%s: error: failed to load session file '%s'\n", __func__, path_session.c_str());
printf("\n");
printf("%s : initializing - please wait ...\n", __func__);
- if (llama_eval(ctx_llama, embd_inp.data(), embd_inp.size(), 0, params.n_threads)) {
+ if (llama_eval(ctx_llama, embd_inp.data(), embd_inp.size(), 0)) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return 1;
}
n_session_consumed = session_tokens.size();
}
- if (llama_eval(ctx_llama, embd.data(), embd.size(), n_past, params.n_threads)) {
+ if (llama_eval(ctx_llama, embd.data(), embd.size(), n_past)) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return 1;
}
{
auto logits = llama_get_logits(ctx_llama);
- auto n_vocab = llama_n_vocab(ctx_llama);
+ auto n_vocab = llama_n_vocab(model_llama);
- logits[llama_token_eos(ctx_llama)] = 0;
+ logits[llama_token_eos(model_llama)] = 0;
std::vector<llama_token_data> candidates;
candidates.reserve(n_vocab);
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
// apply repeat penalty
- const float nl_logit = logits[llama_token_nl(ctx_llama)];
+ const float nl_logit = logits[llama_token_nl(model_llama)];
- llama_sample_repetition_penalty(ctx_llama, &candidates_p,
+ llama_sample_repetition_penalties(ctx_llama, &candidates_p,
embd_inp.data() + std::max(0, n_past - repeat_last_n),
- repeat_last_n, repeat_penalty);
+ repeat_last_n, repeat_penalty, 0.0, 0.0f);
- logits[llama_token_nl(ctx_llama)] = nl_logit;
+ logits[llama_token_nl(model_llama)] = nl_logit;
if (temp <= 0) {
// Greedy sampling
// Temperature sampling
llama_sample_top_k(ctx_llama, &candidates_p, top_k, 1);
llama_sample_top_p(ctx_llama, &candidates_p, top_p, 1);
- llama_sample_temperature(ctx_llama, &candidates_p, temp);
+ llama_sample_temp (ctx_llama, &candidates_p, temp);
id = llama_sample_token(ctx_llama, &candidates_p);
}
}
- if (id != llama_token_eos(ctx_llama)) {
+ if (id != llama_token_eos(model_llama)) {
// add it to the context
embd.push_back(id);
--- /dev/null
+#pragma once
+
+#include <cassert>
+#include <stdexcept>
+#include <vector>
+#include <unordered_map>
+
+static const std::vector<std::pair<uint32_t, uint32_t>> digit_ranges = {
+{0x30, 0x39}, {0xB2, 0xB3}, {0xB9, 0xB9}, {0x660, 0x669}, {0x6F0, 0x6F9}, {0x7C0, 0x7C9}, {0x966, 0x96F}, {0x9E6, 0x9EF}, {0xA66, 0xA6F}, {0xAE6, 0xAEF}, {0xB66, 0xB6F}, {0xBE6, 0xBEF}, {0xC66, 0xC6F},
+{0xCE6, 0xCEF}, {0xD66, 0xD6F}, {0xDE6, 0xDEF}, {0xE50, 0xE59}, {0xED0, 0xED9}, {0xF20, 0xF29}, {0x1040, 0x1049}, {0x1090, 0x1099}, {0x1369, 0x1371}, {0x17E0, 0x17E9}, {0x1810, 0x1819}, {0x1946, 0x194F},
+{0x19D0, 0x19DA}, {0x1A80, 0x1A89}, {0x1A90, 0x1A99}, {0x1B50, 0x1B59}, {0x1BB0, 0x1BB9}, {0x1C40, 0x1C49}, {0x1C50, 0x1C59}, {0x2070, 0x2070}, {0x2074, 0x2079}, {0x2080, 0x2089}, {0x2460, 0x2468},
+{0x2474, 0x247C}, {0x2488, 0x2490}, {0x24EA, 0x24EA}, {0x24F5, 0x24FD}, {0x24FF, 0x24FF}, {0x2776, 0x277E}, {0x2780, 0x2788}, {0x278A, 0x2792}, {0xA620, 0xA629}, {0xA8D0, 0xA8D9}, {0xA900, 0xA909},
+{0xA9D0, 0xA9D9}, {0xA9F0, 0xA9F9}, {0xAA50, 0xAA59}, {0xABF0, 0xABF9}, {0xFF10, 0xFF19}, {0x104A0, 0x104A9}, {0x10A40, 0x10A43}, {0x10D30, 0x10D39}, {0x10E60, 0x10E68}, {0x11052, 0x1105A},
+{0x11066, 0x1106F}, {0x110F0, 0x110F9}, {0x11136, 0x1113F}, {0x111D0, 0x111D9}, {0x112F0, 0x112F9}, {0x11450, 0x11459}, {0x114D0, 0x114D9}, {0x11650, 0x11659}, {0x116C0, 0x116C9}, {0x11730, 0x11739},
+{0x118E0, 0x118E9}, {0x11950, 0x11959}, {0x11C50, 0x11C59}, {0x11D50, 0x11D59}, {0x11DA0, 0x11DA9}, {0x16A60, 0x16A69}, {0x16B50, 0x16B59}, {0x1D7CE, 0x1D7FF}, {0x1E140, 0x1E149}, {0x1E2F0, 0x1E2F9},
+{0x1E950, 0x1E959}, {0x1F100, 0x1F10A}, {0x1FBF0, 0x1FBF9},
+};
+
+static const std::vector<std::pair<uint32_t, uint32_t>> letter_ranges = {
+{0x41, 0x5A}, {0x61, 0x7A}, {0xAA, 0xAA}, {0xB5, 0xB5}, {0xBA, 0xBA}, {0xC0, 0xD6}, {0xD8, 0xF6}, {0xF8, 0x2C1}, {0x2C6, 0x2D1}, {0x2E0, 0x2E4}, {0x2EC, 0x2EC}, {0x2EE, 0x2EE}, {0x370, 0x374},
+{0x376, 0x377}, {0x37A, 0x37D}, {0x37F, 0x37F}, {0x386, 0x386}, {0x388, 0x38A}, {0x38C, 0x38C}, {0x38E, 0x3A1}, {0x3A3, 0x3F5}, {0x3F7, 0x481}, {0x48A, 0x52F}, {0x531, 0x556}, {0x559, 0x559},
+{0x560, 0x588}, {0x5D0, 0x5EA}, {0x5EF, 0x5F2}, {0x620, 0x64A}, {0x66E, 0x66F}, {0x671, 0x6D3}, {0x6D5, 0x6D5}, {0x6E5, 0x6E6}, {0x6EE, 0x6EF}, {0x6FA, 0x6FC}, {0x6FF, 0x6FF}, {0x710, 0x710},
+{0x712, 0x72F}, {0x74D, 0x7A5}, {0x7B1, 0x7B1}, {0x7CA, 0x7EA}, {0x7F4, 0x7F5}, {0x7FA, 0x7FA}, {0x800, 0x815}, {0x81A, 0x81A}, {0x824, 0x824}, {0x828, 0x828}, {0x840, 0x858}, {0x860, 0x86A},
+{0x8A0, 0x8B4}, {0x8B6, 0x8C7}, {0x904, 0x939}, {0x93D, 0x93D}, {0x950, 0x950}, {0x958, 0x961}, {0x971, 0x980}, {0x985, 0x98C}, {0x98F, 0x990}, {0x993, 0x9A8}, {0x9AA, 0x9B0}, {0x9B2, 0x9B2},
+{0x9B6, 0x9B9}, {0x9BD, 0x9BD}, {0x9CE, 0x9CE}, {0x9DC, 0x9DD}, {0x9DF, 0x9E1}, {0x9F0, 0x9F1}, {0x9FC, 0x9FC}, {0xA05, 0xA0A}, {0xA0F, 0xA10}, {0xA13, 0xA28}, {0xA2A, 0xA30}, {0xA32, 0xA33},
+{0xA35, 0xA36}, {0xA38, 0xA39}, {0xA59, 0xA5C}, {0xA5E, 0xA5E}, {0xA72, 0xA74}, {0xA85, 0xA8D}, {0xA8F, 0xA91}, {0xA93, 0xAA8}, {0xAAA, 0xAB0}, {0xAB2, 0xAB3}, {0xAB5, 0xAB9}, {0xABD, 0xABD},
+{0xAD0, 0xAD0}, {0xAE0, 0xAE1}, {0xAF9, 0xAF9}, {0xB05, 0xB0C}, {0xB0F, 0xB10}, {0xB13, 0xB28}, {0xB2A, 0xB30}, {0xB32, 0xB33}, {0xB35, 0xB39}, {0xB3D, 0xB3D}, {0xB5C, 0xB5D}, {0xB5F, 0xB61},
+{0xB71, 0xB71}, {0xB83, 0xB83}, {0xB85, 0xB8A}, {0xB8E, 0xB90}, {0xB92, 0xB95}, {0xB99, 0xB9A}, {0xB9C, 0xB9C}, {0xB9E, 0xB9F}, {0xBA3, 0xBA4}, {0xBA8, 0xBAA}, {0xBAE, 0xBB9}, {0xBD0, 0xBD0},
+{0xC05, 0xC0C}, {0xC0E, 0xC10}, {0xC12, 0xC28}, {0xC2A, 0xC39}, {0xC3D, 0xC3D}, {0xC58, 0xC5A}, {0xC60, 0xC61}, {0xC80, 0xC80}, {0xC85, 0xC8C}, {0xC8E, 0xC90}, {0xC92, 0xCA8}, {0xCAA, 0xCB3},
+{0xCB5, 0xCB9}, {0xCBD, 0xCBD}, {0xCDE, 0xCDE}, {0xCE0, 0xCE1}, {0xCF1, 0xCF2}, {0xD04, 0xD0C}, {0xD0E, 0xD10}, {0xD12, 0xD3A}, {0xD3D, 0xD3D}, {0xD4E, 0xD4E}, {0xD54, 0xD56}, {0xD5F, 0xD61},
+{0xD7A, 0xD7F}, {0xD85, 0xD96}, {0xD9A, 0xDB1}, {0xDB3, 0xDBB}, {0xDBD, 0xDBD}, {0xDC0, 0xDC6}, {0xE01, 0xE30}, {0xE32, 0xE33}, {0xE40, 0xE46}, {0xE81, 0xE82}, {0xE84, 0xE84}, {0xE86, 0xE8A},
+{0xE8C, 0xEA3}, {0xEA5, 0xEA5}, {0xEA7, 0xEB0}, {0xEB2, 0xEB3}, {0xEBD, 0xEBD}, {0xEC0, 0xEC4}, {0xEC6, 0xEC6}, {0xEDC, 0xEDF}, {0xF00, 0xF00}, {0xF40, 0xF47}, {0xF49, 0xF6C}, {0xF88, 0xF8C},
+{0x1000, 0x102A}, {0x103F, 0x103F}, {0x1050, 0x1055}, {0x105A, 0x105D}, {0x1061, 0x1061}, {0x1065, 0x1066}, {0x106E, 0x1070}, {0x1075, 0x1081}, {0x108E, 0x108E}, {0x10A0, 0x10C5}, {0x10C7, 0x10C7},
+{0x10CD, 0x10CD}, {0x10D0, 0x10FA}, {0x10FC, 0x1248}, {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258}, {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D}, {0x1290, 0x12B0}, {0x12B2, 0x12B5},
+{0x12B8, 0x12BE}, {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6}, {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A}, {0x1380, 0x138F}, {0x13A0, 0x13F5}, {0x13F8, 0x13FD}, {0x1401, 0x166C},
+{0x166F, 0x167F}, {0x1681, 0x169A}, {0x16A0, 0x16EA}, {0x16F1, 0x16F8}, {0x1700, 0x170C}, {0x170E, 0x1711}, {0x1720, 0x1731}, {0x1740, 0x1751}, {0x1760, 0x176C}, {0x176E, 0x1770}, {0x1780, 0x17B3},
+{0x17D7, 0x17D7}, {0x17DC, 0x17DC}, {0x1820, 0x1878}, {0x1880, 0x1884}, {0x1887, 0x18A8}, {0x18AA, 0x18AA}, {0x18B0, 0x18F5}, {0x1900, 0x191E}, {0x1950, 0x196D}, {0x1970, 0x1974}, {0x1980, 0x19AB},
+{0x19B0, 0x19C9}, {0x1A00, 0x1A16}, {0x1A20, 0x1A54}, {0x1AA7, 0x1AA7}, {0x1B05, 0x1B33}, {0x1B45, 0x1B4B}, {0x1B83, 0x1BA0}, {0x1BAE, 0x1BAF}, {0x1BBA, 0x1BE5}, {0x1C00, 0x1C23}, {0x1C4D, 0x1C4F},
+{0x1C5A, 0x1C7D}, {0x1C80, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CBF}, {0x1CE9, 0x1CEC}, {0x1CEE, 0x1CF3}, {0x1CF5, 0x1CF6}, {0x1CFA, 0x1CFA}, {0x1D00, 0x1DBF}, {0x1E00, 0x1F15}, {0x1F18, 0x1F1D},
+{0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, {0x1FB6, 0x1FBC}, {0x1FBE, 0x1FBE}, {0x1FC2, 0x1FC4},
+{0x1FC6, 0x1FCC}, {0x1FD0, 0x1FD3}, {0x1FD6, 0x1FDB}, {0x1FE0, 0x1FEC}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFC}, {0x2071, 0x2071}, {0x207F, 0x207F}, {0x2090, 0x209C}, {0x2102, 0x2102}, {0x2107, 0x2107},
+{0x210A, 0x2113}, {0x2115, 0x2115}, {0x2119, 0x211D}, {0x2124, 0x2124}, {0x2126, 0x2126}, {0x2128, 0x2128}, {0x212A, 0x212D}, {0x212F, 0x2139}, {0x213C, 0x213F}, {0x2145, 0x2149}, {0x214E, 0x214E},
+{0x2183, 0x2184}, {0x2C00, 0x2C2E}, {0x2C30, 0x2C5E}, {0x2C60, 0x2CE4}, {0x2CEB, 0x2CEE}, {0x2CF2, 0x2CF3}, {0x2D00, 0x2D25}, {0x2D27, 0x2D27}, {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D6F},
+{0x2D80, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, {0x2E2F, 0x2E2F}, {0x3005, 0x3006},
+{0x3031, 0x3035}, {0x303B, 0x303C}, {0x3041, 0x3096}, {0x309D, 0x309F}, {0x30A1, 0x30FA}, {0x30FC, 0x30FF}, {0x3105, 0x312F}, {0x3131, 0x318E}, {0x31A0, 0x31BF}, {0x31F0, 0x31FF}, {0x3400, 0x4DBF},
+{0x4E00, 0x9FFC}, {0xA000, 0xA48C}, {0xA4D0, 0xA4FD}, {0xA500, 0xA60C}, {0xA610, 0xA61F}, {0xA62A, 0xA62B}, {0xA640, 0xA66E}, {0xA67F, 0xA69D}, {0xA6A0, 0xA6E5}, {0xA717, 0xA71F}, {0xA722, 0xA788},
+{0xA78B, 0xA7BF}, {0xA7C2, 0xA7CA}, {0xA7F5, 0xA801}, {0xA803, 0xA805}, {0xA807, 0xA80A}, {0xA80C, 0xA822}, {0xA840, 0xA873}, {0xA882, 0xA8B3}, {0xA8F2, 0xA8F7}, {0xA8FB, 0xA8FB}, {0xA8FD, 0xA8FE},
+{0xA90A, 0xA925}, {0xA930, 0xA946}, {0xA960, 0xA97C}, {0xA984, 0xA9B2}, {0xA9CF, 0xA9CF}, {0xA9E0, 0xA9E4}, {0xA9E6, 0xA9EF}, {0xA9FA, 0xA9FE}, {0xAA00, 0xAA28}, {0xAA40, 0xAA42}, {0xAA44, 0xAA4B},
+{0xAA60, 0xAA76}, {0xAA7A, 0xAA7A}, {0xAA7E, 0xAAAF}, {0xAAB1, 0xAAB1}, {0xAAB5, 0xAAB6}, {0xAAB9, 0xAABD}, {0xAAC0, 0xAAC0}, {0xAAC2, 0xAAC2}, {0xAADB, 0xAADD}, {0xAAE0, 0xAAEA}, {0xAAF2, 0xAAF4},
+{0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, {0xAB30, 0xAB5A}, {0xAB5C, 0xAB69}, {0xAB70, 0xABE2}, {0xAC00, 0xD7A3}, {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB},
+{0xF900, 0xFA6D}, {0xFA70, 0xFAD9}, {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB1D}, {0xFB1F, 0xFB28}, {0xFB2A, 0xFB36}, {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, {0xFB43, 0xFB44},
+{0xFB46, 0xFBB1}, {0xFBD3, 0xFD3D}, {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFB}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, {0xFF21, 0xFF3A}, {0xFF41, 0xFF5A}, {0xFF66, 0xFFBE}, {0xFFC2, 0xFFC7},
+{0xFFCA, 0xFFCF}, {0xFFD2, 0xFFD7}, {0xFFDA, 0xFFDC}, {0x10000, 0x1000B}, {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D}, {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA},
+{0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x10300, 0x1031F}, {0x1032D, 0x10340}, {0x10342, 0x10349}, {0x10350, 0x10375}, {0x10380, 0x1039D}, {0x103A0, 0x103C3}, {0x103C8, 0x103CF}, {0x10400, 0x1049D},
+{0x104B0, 0x104D3}, {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, {0x10600, 0x10736}, {0x10740, 0x10755}, {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, {0x1080A, 0x10835},
+{0x10837, 0x10838}, {0x1083C, 0x1083C}, {0x1083F, 0x10855}, {0x10860, 0x10876}, {0x10880, 0x1089E}, {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x10900, 0x10915}, {0x10920, 0x10939}, {0x10980, 0x109B7},
+{0x109BE, 0x109BF}, {0x10A00, 0x10A00}, {0x10A10, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35}, {0x10A60, 0x10A7C}, {0x10A80, 0x10A9C}, {0x10AC0, 0x10AC7}, {0x10AC9, 0x10AE4}, {0x10B00, 0x10B35},
+{0x10B40, 0x10B55}, {0x10B60, 0x10B72}, {0x10B80, 0x10B91}, {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, {0x10D00, 0x10D23}, {0x10E80, 0x10EA9}, {0x10EB0, 0x10EB1}, {0x10F00, 0x10F1C},
+{0x10F27, 0x10F27}, {0x10F30, 0x10F45}, {0x10FB0, 0x10FC4}, {0x10FE0, 0x10FF6}, {0x11003, 0x11037}, {0x11083, 0x110AF}, {0x110D0, 0x110E8}, {0x11103, 0x11126}, {0x11144, 0x11144}, {0x11147, 0x11147},
+{0x11150, 0x11172}, {0x11176, 0x11176}, {0x11183, 0x111B2}, {0x111C1, 0x111C4}, {0x111DA, 0x111DA}, {0x111DC, 0x111DC}, {0x11200, 0x11211}, {0x11213, 0x1122B}, {0x11280, 0x11286}, {0x11288, 0x11288},
+{0x1128A, 0x1128D}, {0x1128F, 0x1129D}, {0x1129F, 0x112A8}, {0x112B0, 0x112DE}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, {0x11335, 0x11339},
+{0x1133D, 0x1133D}, {0x11350, 0x11350}, {0x1135D, 0x11361}, {0x11400, 0x11434}, {0x11447, 0x1144A}, {0x1145F, 0x11461}, {0x11480, 0x114AF}, {0x114C4, 0x114C5}, {0x114C7, 0x114C7}, {0x11580, 0x115AE},
+{0x115D8, 0x115DB}, {0x11600, 0x1162F}, {0x11644, 0x11644}, {0x11680, 0x116AA}, {0x116B8, 0x116B8}, {0x11700, 0x1171A}, {0x11800, 0x1182B}, {0x118A0, 0x118DF}, {0x118FF, 0x11906}, {0x11909, 0x11909},
+{0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x1192F}, {0x1193F, 0x1193F}, {0x11941, 0x11941}, {0x119A0, 0x119A7}, {0x119AA, 0x119D0}, {0x119E1, 0x119E1}, {0x119E3, 0x119E3}, {0x11A00, 0x11A00},
+{0x11A0B, 0x11A32}, {0x11A3A, 0x11A3A}, {0x11A50, 0x11A50}, {0x11A5C, 0x11A89}, {0x11A9D, 0x11A9D}, {0x11AC0, 0x11AF8}, {0x11C00, 0x11C08}, {0x11C0A, 0x11C2E}, {0x11C40, 0x11C40}, {0x11C72, 0x11C8F},
+{0x11D00, 0x11D06}, {0x11D08, 0x11D09}, {0x11D0B, 0x11D30}, {0x11D46, 0x11D46}, {0x11D60, 0x11D65}, {0x11D67, 0x11D68}, {0x11D6A, 0x11D89}, {0x11D98, 0x11D98}, {0x11EE0, 0x11EF2}, {0x11FB0, 0x11FB0},
+{0x12000, 0x12399}, {0x12480, 0x12543}, {0x13000, 0x1342E}, {0x14400, 0x14646}, {0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16AD0, 0x16AED}, {0x16B00, 0x16B2F}, {0x16B40, 0x16B43}, {0x16B63, 0x16B77},
+{0x16B7D, 0x16B8F}, {0x16E40, 0x16E7F}, {0x16F00, 0x16F4A}, {0x16F50, 0x16F50}, {0x16F93, 0x16F9F}, {0x16FE0, 0x16FE1}, {0x16FE3, 0x16FE3}, {0x17000, 0x187F7}, {0x18800, 0x18CD5}, {0x18D00, 0x18D08},
+{0x1B000, 0x1B11E}, {0x1B150, 0x1B152}, {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1D400, 0x1D454}, {0x1D456, 0x1D49C},
+{0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514},
+{0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D6C0}, {0x1D6C2, 0x1D6DA}, {0x1D6DC, 0x1D6FA},
+{0x1D6FC, 0x1D714}, {0x1D716, 0x1D734}, {0x1D736, 0x1D74E}, {0x1D750, 0x1D76E}, {0x1D770, 0x1D788}, {0x1D78A, 0x1D7A8}, {0x1D7AA, 0x1D7C2}, {0x1D7C4, 0x1D7CB}, {0x1E100, 0x1E12C}, {0x1E137, 0x1E13D},
+{0x1E14E, 0x1E14E}, {0x1E2C0, 0x1E2EB}, {0x1E800, 0x1E8C4}, {0x1E900, 0x1E943}, {0x1E94B, 0x1E94B}, {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24}, {0x1EE27, 0x1EE27},
+{0x1EE29, 0x1EE32}, {0x1EE34, 0x1EE37}, {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, {0x1EE42, 0x1EE42}, {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, {0x1EE4B, 0x1EE4B}, {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52},
+{0x1EE54, 0x1EE54}, {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, {0x1EE5B, 0x1EE5B}, {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, {0x1EE61, 0x1EE62}, {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, {0x1EE6C, 0x1EE72},
+{0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, {0x1EE7E, 0x1EE7E}, {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, {0x1EEA1, 0x1EEA3}, {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, {0x20000, 0x2A6DD}, {0x2A700, 0x2B734},
+{0x2B740, 0x2B81D}, {0x2B820, 0x2CEA1}, {0x2CEB0, 0x2EBE0}, {0x2F800, 0x2FA1D}, {0x30000, 0x3134A},
+};
+
+static const std::vector<std::pair<uint32_t, uint32_t>> whitespace_ranges = {
+{0x9, 0xD}, {0x1C, 0x20}, {0x85, 0x85}, {0xA0, 0xA0}, {0x1680, 0x1680}, {0x2000, 0x200A}, {0x2028, 0x2029}, {0x202F, 0x202F}, {0x205F, 0x205F}, {0x3000, 0x3000},
+};
+
+static const std::vector<std::pair<uint32_t, uint32_t>> accent_mark_ranges = {
+{0x300, 0x36F}, {0x483, 0x489}, {0x591, 0x5BD}, {0x5BF, 0x5BF}, {0x5C1, 0x5C2}, {0x5C4, 0x5C5}, {0x5C7, 0x5C7}, {0x610, 0x61A}, {0x64B, 0x65F}, {0x670, 0x670}, {0x6D6, 0x6DC}, {0x6DF, 0x6E4},
+{0x6E7, 0x6E8}, {0x6EA, 0x6ED}, {0x711, 0x711}, {0x730, 0x74A}, {0x7A6, 0x7B0}, {0x7EB, 0x7F3}, {0x7FD, 0x7FD}, {0x816, 0x819}, {0x81B, 0x823}, {0x825, 0x827}, {0x829, 0x82D}, {0x859, 0x85B},
+{0x8D3, 0x8E1}, {0x8E3, 0x903}, {0x93A, 0x93C}, {0x93E, 0x94F}, {0x951, 0x957}, {0x962, 0x963}, {0x981, 0x983}, {0x9BC, 0x9BC}, {0x9BE, 0x9C4}, {0x9C7, 0x9C8}, {0x9CB, 0x9CD}, {0x9D7, 0x9D7},
+{0x9E2, 0x9E3}, {0x9FE, 0x9FE}, {0xA01, 0xA03}, {0xA3C, 0xA3C}, {0xA3E, 0xA42}, {0xA47, 0xA48}, {0xA4B, 0xA4D}, {0xA51, 0xA51}, {0xA70, 0xA71}, {0xA75, 0xA75}, {0xA81, 0xA83}, {0xABC, 0xABC},
+{0xABE, 0xAC5}, {0xAC7, 0xAC9}, {0xACB, 0xACD}, {0xAE2, 0xAE3}, {0xAFA, 0xAFF}, {0xB01, 0xB03}, {0xB3C, 0xB3C}, {0xB3E, 0xB44}, {0xB47, 0xB48}, {0xB4B, 0xB4D}, {0xB55, 0xB57}, {0xB62, 0xB63},
+{0xB82, 0xB82}, {0xBBE, 0xBC2}, {0xBC6, 0xBC8}, {0xBCA, 0xBCD}, {0xBD7, 0xBD7}, {0xC00, 0xC04}, {0xC3E, 0xC44}, {0xC46, 0xC48}, {0xC4A, 0xC4D}, {0xC55, 0xC56}, {0xC62, 0xC63}, {0xC81, 0xC83},
+{0xCBC, 0xCBC}, {0xCBE, 0xCC4}, {0xCC6, 0xCC8}, {0xCCA, 0xCCD}, {0xCD5, 0xCD6}, {0xCE2, 0xCE3}, {0xD00, 0xD03}, {0xD3B, 0xD3C}, {0xD3E, 0xD44}, {0xD46, 0xD48}, {0xD4A, 0xD4D}, {0xD57, 0xD57},
+{0xD62, 0xD63}, {0xD81, 0xD83}, {0xDCA, 0xDCA}, {0xDCF, 0xDD4}, {0xDD6, 0xDD6}, {0xDD8, 0xDDF}, {0xDF2, 0xDF3}, {0xE31, 0xE31}, {0xE34, 0xE3A}, {0xE47, 0xE4E}, {0xEB1, 0xEB1}, {0xEB4, 0xEBC},
+{0xEC8, 0xECD}, {0xF18, 0xF19}, {0xF35, 0xF35}, {0xF37, 0xF37}, {0xF39, 0xF39}, {0xF3E, 0xF3F}, {0xF71, 0xF84}, {0xF86, 0xF87}, {0xF8D, 0xF97}, {0xF99, 0xFBC}, {0xFC6, 0xFC6}, {0x102B, 0x103E},
+{0x1056, 0x1059}, {0x105E, 0x1060}, {0x1062, 0x1064}, {0x1067, 0x106D}, {0x1071, 0x1074}, {0x1082, 0x108D}, {0x108F, 0x108F}, {0x109A, 0x109D}, {0x135D, 0x135F}, {0x1712, 0x1714}, {0x1732, 0x1734},
+{0x1752, 0x1753}, {0x1772, 0x1773}, {0x17B4, 0x17D3}, {0x17DD, 0x17DD}, {0x180B, 0x180D}, {0x1885, 0x1886}, {0x18A9, 0x18A9}, {0x1920, 0x192B}, {0x1930, 0x193B}, {0x1A17, 0x1A1B}, {0x1A55, 0x1A5E},
+{0x1A60, 0x1A7C}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0}, {0x1B00, 0x1B04}, {0x1B34, 0x1B44}, {0x1B6B, 0x1B73}, {0x1B80, 0x1B82}, {0x1BA1, 0x1BAD}, {0x1BE6, 0x1BF3}, {0x1C24, 0x1C37}, {0x1CD0, 0x1CD2},
+{0x1CD4, 0x1CE8}, {0x1CED, 0x1CED}, {0x1CF4, 0x1CF4}, {0x1CF7, 0x1CF9}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF}, {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2D7F, 0x2D7F}, {0x2DE0, 0x2DFF}, {0x302A, 0x302F},
+{0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D}, {0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA802, 0xA802}, {0xA806, 0xA806}, {0xA80B, 0xA80B}, {0xA823, 0xA827}, {0xA82C, 0xA82C}, {0xA880, 0xA881},
+{0xA8B4, 0xA8C5}, {0xA8E0, 0xA8F1}, {0xA8FF, 0xA8FF}, {0xA926, 0xA92D}, {0xA947, 0xA953}, {0xA980, 0xA983}, {0xA9B3, 0xA9C0}, {0xA9E5, 0xA9E5}, {0xAA29, 0xAA36}, {0xAA43, 0xAA43}, {0xAA4C, 0xAA4D},
+{0xAA7B, 0xAA7D}, {0xAAB0, 0xAAB0}, {0xAAB2, 0xAAB4}, {0xAAB7, 0xAAB8}, {0xAABE, 0xAABF}, {0xAAC1, 0xAAC1}, {0xAAEB, 0xAAEF}, {0xAAF5, 0xAAF6}, {0xABE3, 0xABEA}, {0xABEC, 0xABED}, {0xFB1E, 0xFB1E},
+{0xFE00, 0xFE0F}, {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x102E0, 0x102E0}, {0x10376, 0x1037A}, {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F},
+{0x10AE5, 0x10AE6}, {0x10D24, 0x10D27}, {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11000, 0x11002}, {0x11038, 0x11046}, {0x1107F, 0x11082}, {0x110B0, 0x110BA}, {0x11100, 0x11102}, {0x11127, 0x11134},
+{0x11145, 0x11146}, {0x11173, 0x11173}, {0x11180, 0x11182}, {0x111B3, 0x111C0}, {0x111C9, 0x111CC}, {0x111CE, 0x111CF}, {0x1122C, 0x11237}, {0x1123E, 0x1123E}, {0x112DF, 0x112EA}, {0x11300, 0x11303},
+{0x1133B, 0x1133C}, {0x1133E, 0x11344}, {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11357, 0x11357}, {0x11362, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11435, 0x11446}, {0x1145E, 0x1145E},
+{0x114B0, 0x114C3}, {0x115AF, 0x115B5}, {0x115B8, 0x115C0}, {0x115DC, 0x115DD}, {0x11630, 0x11640}, {0x116AB, 0x116B7}, {0x1171D, 0x1172B}, {0x1182C, 0x1183A}, {0x11930, 0x11935}, {0x11937, 0x11938},
+{0x1193B, 0x1193E}, {0x11940, 0x11940}, {0x11942, 0x11943}, {0x119D1, 0x119D7}, {0x119DA, 0x119E0}, {0x119E4, 0x119E4}, {0x11A01, 0x11A0A}, {0x11A33, 0x11A39}, {0x11A3B, 0x11A3E}, {0x11A47, 0x11A47},
+{0x11A51, 0x11A5B}, {0x11A8A, 0x11A99}, {0x11C2F, 0x11C36}, {0x11C38, 0x11C3F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, {0x11D31, 0x11D36}, {0x11D3A, 0x11D3A}, {0x11D3C, 0x11D3D}, {0x11D3F, 0x11D45},
+{0x11D47, 0x11D47}, {0x11D8A, 0x11D8E}, {0x11D90, 0x11D91}, {0x11D93, 0x11D97}, {0x11EF3, 0x11EF6}, {0x16AF0, 0x16AF4}, {0x16B30, 0x16B36}, {0x16F4F, 0x16F4F}, {0x16F51, 0x16F87}, {0x16F8F, 0x16F92},
+{0x16FE4, 0x16FE4}, {0x16FF0, 0x16FF1}, {0x1BC9D, 0x1BC9E}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172}, {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0x1DA00, 0x1DA36},
+{0x1DA3B, 0x1DA6C}, {0x1DA75, 0x1DA75}, {0x1DA84, 0x1DA84}, {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A},
+{0x1E130, 0x1E136}, {0x1E2EC, 0x1E2EF}, {0x1E8D0, 0x1E8D6}, {0x1E944, 0x1E94A}, {0xE0100, 0xE01EF},
+};
+
+static const std::vector<std::pair<uint32_t, uint32_t>> punctuation_ranges = {
+{0x21, 0x23}, {0x25, 0x2A}, {0x2C, 0x2F}, {0x3A, 0x3B}, {0x3F, 0x40}, {0x5B, 0x5D}, {0x5F, 0x5F}, {0x7B, 0x7B}, {0x7D, 0x7D}, {0xA1, 0xA1}, {0xA7, 0xA7}, {0xAB, 0xAB}, {0xB6, 0xB7}, {0xBB, 0xBB},
+{0xBF, 0xBF}, {0x37E, 0x37E}, {0x387, 0x387}, {0x55A, 0x55F}, {0x589, 0x58A}, {0x5BE, 0x5BE}, {0x5C0, 0x5C0}, {0x5C3, 0x5C3}, {0x5C6, 0x5C6}, {0x5F3, 0x5F4}, {0x609, 0x60A}, {0x60C, 0x60D},
+{0x61B, 0x61B}, {0x61E, 0x61F}, {0x66A, 0x66D}, {0x6D4, 0x6D4}, {0x700, 0x70D}, {0x7F7, 0x7F9}, {0x830, 0x83E}, {0x85E, 0x85E}, {0x964, 0x965}, {0x970, 0x970}, {0x9FD, 0x9FD}, {0xA76, 0xA76},
+{0xAF0, 0xAF0}, {0xC77, 0xC77}, {0xC84, 0xC84}, {0xDF4, 0xDF4}, {0xE4F, 0xE4F}, {0xE5A, 0xE5B}, {0xF04, 0xF12}, {0xF14, 0xF14}, {0xF3A, 0xF3D}, {0xF85, 0xF85}, {0xFD0, 0xFD4}, {0xFD9, 0xFDA},
+{0x104A, 0x104F}, {0x10FB, 0x10FB}, {0x1360, 0x1368}, {0x1400, 0x1400}, {0x166E, 0x166E}, {0x169B, 0x169C}, {0x16EB, 0x16ED}, {0x1735, 0x1736}, {0x17D4, 0x17D6}, {0x17D8, 0x17DA}, {0x1800, 0x180A},
+{0x1944, 0x1945}, {0x1A1E, 0x1A1F}, {0x1AA0, 0x1AA6}, {0x1AA8, 0x1AAD}, {0x1B5A, 0x1B60}, {0x1BFC, 0x1BFF}, {0x1C3B, 0x1C3F}, {0x1C7E, 0x1C7F}, {0x1CC0, 0x1CC7}, {0x1CD3, 0x1CD3}, {0x2010, 0x2027},
+{0x2030, 0x2043}, {0x2045, 0x2051}, {0x2053, 0x205E}, {0x207D, 0x207E}, {0x208D, 0x208E}, {0x2308, 0x230B}, {0x2329, 0x232A}, {0x2768, 0x2775}, {0x27C5, 0x27C6}, {0x27E6, 0x27EF}, {0x2983, 0x2998},
+{0x29D8, 0x29DB}, {0x29FC, 0x29FD}, {0x2CF9, 0x2CFC}, {0x2CFE, 0x2CFF}, {0x2D70, 0x2D70}, {0x2E00, 0x2E2E}, {0x2E30, 0x2E4F}, {0x2E52, 0x2E52}, {0x3001, 0x3003}, {0x3008, 0x3011}, {0x3014, 0x301F},
+{0x3030, 0x3030}, {0x303D, 0x303D}, {0x30A0, 0x30A0}, {0x30FB, 0x30FB}, {0xA4FE, 0xA4FF}, {0xA60D, 0xA60F}, {0xA673, 0xA673}, {0xA67E, 0xA67E}, {0xA6F2, 0xA6F7}, {0xA874, 0xA877}, {0xA8CE, 0xA8CF},
+{0xA8F8, 0xA8FA}, {0xA8FC, 0xA8FC}, {0xA92E, 0xA92F}, {0xA95F, 0xA95F}, {0xA9C1, 0xA9CD}, {0xA9DE, 0xA9DF}, {0xAA5C, 0xAA5F}, {0xAADE, 0xAADF}, {0xAAF0, 0xAAF1}, {0xABEB, 0xABEB}, {0xFD3E, 0xFD3F},
+{0xFE10, 0xFE19}, {0xFE30, 0xFE52}, {0xFE54, 0xFE61}, {0xFE63, 0xFE63}, {0xFE68, 0xFE68}, {0xFE6A, 0xFE6B}, {0xFF01, 0xFF03}, {0xFF05, 0xFF0A}, {0xFF0C, 0xFF0F}, {0xFF1A, 0xFF1B}, {0xFF1F, 0xFF20},
+{0xFF3B, 0xFF3D}, {0xFF3F, 0xFF3F}, {0xFF5B, 0xFF5B}, {0xFF5D, 0xFF5D}, {0xFF5F, 0xFF65}, {0x10100, 0x10102}, {0x1039F, 0x1039F}, {0x103D0, 0x103D0}, {0x1056F, 0x1056F}, {0x10857, 0x10857},
+{0x1091F, 0x1091F}, {0x1093F, 0x1093F}, {0x10A50, 0x10A58}, {0x10A7F, 0x10A7F}, {0x10AF0, 0x10AF6}, {0x10B39, 0x10B3F}, {0x10B99, 0x10B9C}, {0x10EAD, 0x10EAD}, {0x10F55, 0x10F59}, {0x11047, 0x1104D},
+{0x110BB, 0x110BC}, {0x110BE, 0x110C1}, {0x11140, 0x11143}, {0x11174, 0x11175}, {0x111C5, 0x111C8}, {0x111CD, 0x111CD}, {0x111DB, 0x111DB}, {0x111DD, 0x111DF}, {0x11238, 0x1123D}, {0x112A9, 0x112A9},
+{0x1144B, 0x1144F}, {0x1145A, 0x1145B}, {0x1145D, 0x1145D}, {0x114C6, 0x114C6}, {0x115C1, 0x115D7}, {0x11641, 0x11643}, {0x11660, 0x1166C}, {0x1173C, 0x1173E}, {0x1183B, 0x1183B}, {0x11944, 0x11946},
+{0x119E2, 0x119E2}, {0x11A3F, 0x11A46}, {0x11A9A, 0x11A9C}, {0x11A9E, 0x11AA2}, {0x11C41, 0x11C45}, {0x11C70, 0x11C71}, {0x11EF7, 0x11EF8}, {0x11FFF, 0x11FFF}, {0x12470, 0x12474}, {0x16A6E, 0x16A6F},
+{0x16AF5, 0x16AF5}, {0x16B37, 0x16B3B}, {0x16B44, 0x16B44}, {0x16E97, 0x16E9A}, {0x16FE2, 0x16FE2}, {0x1BC9F, 0x1BC9F}, {0x1DA87, 0x1DA8B}, {0x1E95E, 0x1E95F},
+};
+
+static const std::vector<std::pair<uint32_t, uint32_t>> symbol_ranges = {
+{0x24, 0x24}, {0x2B, 0x2B}, {0x3C, 0x3E}, {0x5E, 0x5E}, {0x60, 0x60}, {0x7C, 0x7C}, {0x7E, 0x7E}, {0xA2, 0xA6}, {0xA8, 0xA9}, {0xAC, 0xAC}, {0xAE, 0xB1}, {0xB4, 0xB4}, {0xB8, 0xB8}, {0xD7, 0xD7},
+{0xF7, 0xF7}, {0x2C2, 0x2C5}, {0x2D2, 0x2DF}, {0x2E5, 0x2EB}, {0x2ED, 0x2ED}, {0x2EF, 0x2FF}, {0x375, 0x375}, {0x384, 0x385}, {0x3F6, 0x3F6}, {0x482, 0x482}, {0x58D, 0x58F}, {0x606, 0x608},
+{0x60B, 0x60B}, {0x60E, 0x60F}, {0x6DE, 0x6DE}, {0x6E9, 0x6E9}, {0x6FD, 0x6FE}, {0x7F6, 0x7F6}, {0x7FE, 0x7FF}, {0x9F2, 0x9F3}, {0x9FA, 0x9FB}, {0xAF1, 0xAF1}, {0xB70, 0xB70}, {0xBF3, 0xBFA},
+{0xC7F, 0xC7F}, {0xD4F, 0xD4F}, {0xD79, 0xD79}, {0xE3F, 0xE3F}, {0xF01, 0xF03}, {0xF13, 0xF13}, {0xF15, 0xF17}, {0xF1A, 0xF1F}, {0xF34, 0xF34}, {0xF36, 0xF36}, {0xF38, 0xF38}, {0xFBE, 0xFC5},
+{0xFC7, 0xFCC}, {0xFCE, 0xFCF}, {0xFD5, 0xFD8}, {0x109E, 0x109F}, {0x1390, 0x1399}, {0x166D, 0x166D}, {0x17DB, 0x17DB}, {0x1940, 0x1940}, {0x19DE, 0x19FF}, {0x1B61, 0x1B6A}, {0x1B74, 0x1B7C},
+{0x1FBD, 0x1FBD}, {0x1FBF, 0x1FC1}, {0x1FCD, 0x1FCF}, {0x1FDD, 0x1FDF}, {0x1FED, 0x1FEF}, {0x1FFD, 0x1FFE}, {0x2044, 0x2044}, {0x2052, 0x2052}, {0x207A, 0x207C}, {0x208A, 0x208C}, {0x20A0, 0x20BF},
+{0x2100, 0x2101}, {0x2103, 0x2106}, {0x2108, 0x2109}, {0x2114, 0x2114}, {0x2116, 0x2118}, {0x211E, 0x2123}, {0x2125, 0x2125}, {0x2127, 0x2127}, {0x2129, 0x2129}, {0x212E, 0x212E}, {0x213A, 0x213B},
+{0x2140, 0x2144}, {0x214A, 0x214D}, {0x214F, 0x214F}, {0x218A, 0x218B}, {0x2190, 0x2307}, {0x230C, 0x2328}, {0x232B, 0x2426}, {0x2440, 0x244A}, {0x249C, 0x24E9}, {0x2500, 0x2767}, {0x2794, 0x27C4},
+{0x27C7, 0x27E5}, {0x27F0, 0x2982}, {0x2999, 0x29D7}, {0x29DC, 0x29FB}, {0x29FE, 0x2B73}, {0x2B76, 0x2B95}, {0x2B97, 0x2BFF}, {0x2CE5, 0x2CEA}, {0x2E50, 0x2E51}, {0x2E80, 0x2E99}, {0x2E9B, 0x2EF3},
+{0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, {0x3004, 0x3004}, {0x3012, 0x3013}, {0x3020, 0x3020}, {0x3036, 0x3037}, {0x303E, 0x303F}, {0x309B, 0x309C}, {0x3190, 0x3191}, {0x3196, 0x319F}, {0x31C0, 0x31E3},
+{0x3200, 0x321E}, {0x322A, 0x3247}, {0x3250, 0x3250}, {0x3260, 0x327F}, {0x328A, 0x32B0}, {0x32C0, 0x33FF}, {0x4DC0, 0x4DFF}, {0xA490, 0xA4C6}, {0xA700, 0xA716}, {0xA720, 0xA721}, {0xA789, 0xA78A},
+{0xA828, 0xA82B}, {0xA836, 0xA839}, {0xAA77, 0xAA79}, {0xAB5B, 0xAB5B}, {0xAB6A, 0xAB6B}, {0xFB29, 0xFB29}, {0xFBB2, 0xFBC1}, {0xFDFC, 0xFDFD}, {0xFE62, 0xFE62}, {0xFE64, 0xFE66}, {0xFE69, 0xFE69},
+{0xFF04, 0xFF04}, {0xFF0B, 0xFF0B}, {0xFF1C, 0xFF1E}, {0xFF3E, 0xFF3E}, {0xFF40, 0xFF40}, {0xFF5C, 0xFF5C}, {0xFF5E, 0xFF5E}, {0xFFE0, 0xFFE6}, {0xFFE8, 0xFFEE}, {0xFFFC, 0xFFFD}, {0x10137, 0x1013F},
+{0x10179, 0x10189}, {0x1018C, 0x1018E}, {0x10190, 0x1019C}, {0x101A0, 0x101A0}, {0x101D0, 0x101FC}, {0x10877, 0x10878}, {0x10AC8, 0x10AC8}, {0x1173F, 0x1173F}, {0x11FD5, 0x11FF1}, {0x16B3C, 0x16B3F},
+{0x16B45, 0x16B45}, {0x1BC9C, 0x1BC9C}, {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D164}, {0x1D16A, 0x1D16C}, {0x1D183, 0x1D184}, {0x1D18C, 0x1D1A9}, {0x1D1AE, 0x1D1E8}, {0x1D200, 0x1D241},
+{0x1D245, 0x1D245}, {0x1D300, 0x1D356}, {0x1D6C1, 0x1D6C1}, {0x1D6DB, 0x1D6DB}, {0x1D6FB, 0x1D6FB}, {0x1D715, 0x1D715}, {0x1D735, 0x1D735}, {0x1D74F, 0x1D74F}, {0x1D76F, 0x1D76F}, {0x1D789, 0x1D789},
+{0x1D7A9, 0x1D7A9}, {0x1D7C3, 0x1D7C3}, {0x1D800, 0x1D9FF}, {0x1DA37, 0x1DA3A}, {0x1DA6D, 0x1DA74}, {0x1DA76, 0x1DA83}, {0x1DA85, 0x1DA86}, {0x1E14F, 0x1E14F}, {0x1E2FF, 0x1E2FF}, {0x1ECAC, 0x1ECAC},
+{0x1ECB0, 0x1ECB0}, {0x1ED2E, 0x1ED2E}, {0x1EEF0, 0x1EEF1}, {0x1F000, 0x1F02B}, {0x1F030, 0x1F093}, {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, {0x1F0C1, 0x1F0CF}, {0x1F0D1, 0x1F0F5}, {0x1F10D, 0x1F1AD},
+{0x1F1E6, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, {0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F6D7}, {0x1F6E0, 0x1F6EC}, {0x1F6F0, 0x1F6FC}, {0x1F700, 0x1F773}, {0x1F780, 0x1F7D8},
+{0x1F7E0, 0x1F7EB}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, {0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, {0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F978}, {0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1FA53},
+{0x1FA60, 0x1FA6D}, {0x1FA70, 0x1FA74}, {0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8}, {0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6}, {0x1FB00, 0x1FB92}, {0x1FB94, 0x1FBCA},
+};
+
+static const std::vector<std::pair<uint32_t, uint32_t>> control_ranges = {
+{0x0, 0x8}, {0xE, 0x1B}, {0x7F, 0x84}, {0x86, 0x9F}, {0xAD, 0xAD}, {0x378, 0x379}, {0x380, 0x383}, {0x38B, 0x38B}, {0x38D, 0x38D}, {0x3A2, 0x3A2}, {0x530, 0x530}, {0x557, 0x558}, {0x58B, 0x58C},
+{0x590, 0x590}, {0x5C8, 0x5CF}, {0x5EB, 0x5EE}, {0x5F5, 0x605}, {0x61C, 0x61D}, {0x6DD, 0x6DD}, {0x70E, 0x70F}, {0x74B, 0x74C}, {0x7B2, 0x7BF}, {0x7FB, 0x7FC}, {0x82E, 0x82F}, {0x83F, 0x83F},
+{0x85C, 0x85D}, {0x85F, 0x85F}, {0x86B, 0x89F}, {0x8B5, 0x8B5}, {0x8C8, 0x8D2}, {0x8E2, 0x8E2}, {0x984, 0x984}, {0x98D, 0x98E}, {0x991, 0x992}, {0x9A9, 0x9A9}, {0x9B1, 0x9B1}, {0x9B3, 0x9B5},
+{0x9BA, 0x9BB}, {0x9C5, 0x9C6}, {0x9C9, 0x9CA}, {0x9CF, 0x9D6}, {0x9D8, 0x9DB}, {0x9DE, 0x9DE}, {0x9E4, 0x9E5}, {0x9FF, 0xA00}, {0xA04, 0xA04}, {0xA0B, 0xA0E}, {0xA11, 0xA12}, {0xA29, 0xA29},
+{0xA31, 0xA31}, {0xA34, 0xA34}, {0xA37, 0xA37}, {0xA3A, 0xA3B}, {0xA3D, 0xA3D}, {0xA43, 0xA46}, {0xA49, 0xA4A}, {0xA4E, 0xA50}, {0xA52, 0xA58}, {0xA5D, 0xA5D}, {0xA5F, 0xA65}, {0xA77, 0xA80},
+{0xA84, 0xA84}, {0xA8E, 0xA8E}, {0xA92, 0xA92}, {0xAA9, 0xAA9}, {0xAB1, 0xAB1}, {0xAB4, 0xAB4}, {0xABA, 0xABB}, {0xAC6, 0xAC6}, {0xACA, 0xACA}, {0xACE, 0xACF}, {0xAD1, 0xADF}, {0xAE4, 0xAE5},
+{0xAF2, 0xAF8}, {0xB00, 0xB00}, {0xB04, 0xB04}, {0xB0D, 0xB0E}, {0xB11, 0xB12}, {0xB29, 0xB29}, {0xB31, 0xB31}, {0xB34, 0xB34}, {0xB3A, 0xB3B}, {0xB45, 0xB46}, {0xB49, 0xB4A}, {0xB4E, 0xB54},
+{0xB58, 0xB5B}, {0xB5E, 0xB5E}, {0xB64, 0xB65}, {0xB78, 0xB81}, {0xB84, 0xB84}, {0xB8B, 0xB8D}, {0xB91, 0xB91}, {0xB96, 0xB98}, {0xB9B, 0xB9B}, {0xB9D, 0xB9D}, {0xBA0, 0xBA2}, {0xBA5, 0xBA7},
+{0xBAB, 0xBAD}, {0xBBA, 0xBBD}, {0xBC3, 0xBC5}, {0xBC9, 0xBC9}, {0xBCE, 0xBCF}, {0xBD1, 0xBD6}, {0xBD8, 0xBE5}, {0xBFB, 0xBFF}, {0xC0D, 0xC0D}, {0xC11, 0xC11}, {0xC29, 0xC29}, {0xC3A, 0xC3C},
+{0xC45, 0xC45}, {0xC49, 0xC49}, {0xC4E, 0xC54}, {0xC57, 0xC57}, {0xC5B, 0xC5F}, {0xC64, 0xC65}, {0xC70, 0xC76}, {0xC8D, 0xC8D}, {0xC91, 0xC91}, {0xCA9, 0xCA9}, {0xCB4, 0xCB4}, {0xCBA, 0xCBB},
+{0xCC5, 0xCC5}, {0xCC9, 0xCC9}, {0xCCE, 0xCD4}, {0xCD7, 0xCDD}, {0xCDF, 0xCDF}, {0xCE4, 0xCE5}, {0xCF0, 0xCF0}, {0xCF3, 0xCFF}, {0xD0D, 0xD0D}, {0xD11, 0xD11}, {0xD45, 0xD45}, {0xD49, 0xD49},
+{0xD50, 0xD53}, {0xD64, 0xD65}, {0xD80, 0xD80}, {0xD84, 0xD84}, {0xD97, 0xD99}, {0xDB2, 0xDB2}, {0xDBC, 0xDBC}, {0xDBE, 0xDBF}, {0xDC7, 0xDC9}, {0xDCB, 0xDCE}, {0xDD5, 0xDD5}, {0xDD7, 0xDD7},
+{0xDE0, 0xDE5}, {0xDF0, 0xDF1}, {0xDF5, 0xE00}, {0xE3B, 0xE3E}, {0xE5C, 0xE80}, {0xE83, 0xE83}, {0xE85, 0xE85}, {0xE8B, 0xE8B}, {0xEA4, 0xEA4}, {0xEA6, 0xEA6}, {0xEBE, 0xEBF}, {0xEC5, 0xEC5},
+{0xEC7, 0xEC7}, {0xECE, 0xECF}, {0xEDA, 0xEDB}, {0xEE0, 0xEFF}, {0xF48, 0xF48}, {0xF6D, 0xF70}, {0xF98, 0xF98}, {0xFBD, 0xFBD}, {0xFCD, 0xFCD}, {0xFDB, 0xFFF}, {0x10C6, 0x10C6}, {0x10C8, 0x10CC},
+{0x10CE, 0x10CF}, {0x1249, 0x1249}, {0x124E, 0x124F}, {0x1257, 0x1257}, {0x1259, 0x1259}, {0x125E, 0x125F}, {0x1289, 0x1289}, {0x128E, 0x128F}, {0x12B1, 0x12B1}, {0x12B6, 0x12B7}, {0x12BF, 0x12BF},
+{0x12C1, 0x12C1}, {0x12C6, 0x12C7}, {0x12D7, 0x12D7}, {0x1311, 0x1311}, {0x1316, 0x1317}, {0x135B, 0x135C}, {0x137D, 0x137F}, {0x139A, 0x139F}, {0x13F6, 0x13F7}, {0x13FE, 0x13FF}, {0x169D, 0x169F},
+{0x16F9, 0x16FF}, {0x170D, 0x170D}, {0x1715, 0x171F}, {0x1737, 0x173F}, {0x1754, 0x175F}, {0x176D, 0x176D}, {0x1771, 0x1771}, {0x1774, 0x177F}, {0x17DE, 0x17DF}, {0x17EA, 0x17EF}, {0x17FA, 0x17FF},
+{0x180E, 0x180F}, {0x181A, 0x181F}, {0x1879, 0x187F}, {0x18AB, 0x18AF}, {0x18F6, 0x18FF}, {0x191F, 0x191F}, {0x192C, 0x192F}, {0x193C, 0x193F}, {0x1941, 0x1943}, {0x196E, 0x196F}, {0x1975, 0x197F},
+{0x19AC, 0x19AF}, {0x19CA, 0x19CF}, {0x19DB, 0x19DD}, {0x1A1C, 0x1A1D}, {0x1A5F, 0x1A5F}, {0x1A7D, 0x1A7E}, {0x1A8A, 0x1A8F}, {0x1A9A, 0x1A9F}, {0x1AAE, 0x1AAF}, {0x1AC1, 0x1AFF}, {0x1B4C, 0x1B4F},
+{0x1B7D, 0x1B7F}, {0x1BF4, 0x1BFB}, {0x1C38, 0x1C3A}, {0x1C4A, 0x1C4C}, {0x1C89, 0x1C8F}, {0x1CBB, 0x1CBC}, {0x1CC8, 0x1CCF}, {0x1CFB, 0x1CFF}, {0x1DFA, 0x1DFA}, {0x1F16, 0x1F17}, {0x1F1E, 0x1F1F},
+{0x1F46, 0x1F47}, {0x1F4E, 0x1F4F}, {0x1F58, 0x1F58}, {0x1F5A, 0x1F5A}, {0x1F5C, 0x1F5C}, {0x1F5E, 0x1F5E}, {0x1F7E, 0x1F7F}, {0x1FB5, 0x1FB5}, {0x1FC5, 0x1FC5}, {0x1FD4, 0x1FD5}, {0x1FDC, 0x1FDC},
+{0x1FF0, 0x1FF1}, {0x1FF5, 0x1FF5}, {0x1FFF, 0x1FFF}, {0x200B, 0x200F}, {0x202A, 0x202E}, {0x2060, 0x206F}, {0x2072, 0x2073}, {0x208F, 0x208F}, {0x209D, 0x209F}, {0x20C0, 0x20CF}, {0x20F1, 0x20FF},
+{0x218C, 0x218F}, {0x2427, 0x243F}, {0x244B, 0x245F}, {0x2B74, 0x2B75}, {0x2B96, 0x2B96}, {0x2C2F, 0x2C2F}, {0x2C5F, 0x2C5F}, {0x2CF4, 0x2CF8}, {0x2D26, 0x2D26}, {0x2D28, 0x2D2C}, {0x2D2E, 0x2D2F},
+{0x2D68, 0x2D6E}, {0x2D71, 0x2D7E}, {0x2D97, 0x2D9F}, {0x2DA7, 0x2DA7}, {0x2DAF, 0x2DAF}, {0x2DB7, 0x2DB7}, {0x2DBF, 0x2DBF}, {0x2DC7, 0x2DC7}, {0x2DCF, 0x2DCF}, {0x2DD7, 0x2DD7}, {0x2DDF, 0x2DDF},
+{0x2E53, 0x2E7F}, {0x2E9A, 0x2E9A}, {0x2EF4, 0x2EFF}, {0x2FD6, 0x2FEF}, {0x2FFC, 0x2FFF}, {0x3040, 0x3040}, {0x3097, 0x3098}, {0x3100, 0x3104}, {0x3130, 0x3130}, {0x318F, 0x318F}, {0x31E4, 0x31EF},
+{0x321F, 0x321F}, {0x9FFD, 0x9FFF}, {0xA48D, 0xA48F}, {0xA4C7, 0xA4CF}, {0xA62C, 0xA63F}, {0xA6F8, 0xA6FF}, {0xA7C0, 0xA7C1}, {0xA7CB, 0xA7F4}, {0xA82D, 0xA82F}, {0xA83A, 0xA83F}, {0xA878, 0xA87F},
+{0xA8C6, 0xA8CD}, {0xA8DA, 0xA8DF}, {0xA954, 0xA95E}, {0xA97D, 0xA97F}, {0xA9CE, 0xA9CE}, {0xA9DA, 0xA9DD}, {0xA9FF, 0xA9FF}, {0xAA37, 0xAA3F}, {0xAA4E, 0xAA4F}, {0xAA5A, 0xAA5B}, {0xAAC3, 0xAADA},
+{0xAAF7, 0xAB00}, {0xAB07, 0xAB08}, {0xAB0F, 0xAB10}, {0xAB17, 0xAB1F}, {0xAB27, 0xAB27}, {0xAB2F, 0xAB2F}, {0xAB6C, 0xAB6F}, {0xABEE, 0xABEF}, {0xABFA, 0xABFF}, {0xD7A4, 0xD7AF}, {0xD7C7, 0xD7CA},
+{0xD7FC, 0xF8FF}, {0xFA6E, 0xFA6F}, {0xFADA, 0xFAFF}, {0xFB07, 0xFB12}, {0xFB18, 0xFB1C}, {0xFB37, 0xFB37}, {0xFB3D, 0xFB3D}, {0xFB3F, 0xFB3F}, {0xFB42, 0xFB42}, {0xFB45, 0xFB45}, {0xFBC2, 0xFBD2},
+{0xFD40, 0xFD4F}, {0xFD90, 0xFD91}, {0xFDC8, 0xFDEF}, {0xFDFE, 0xFDFF}, {0xFE1A, 0xFE1F}, {0xFE53, 0xFE53}, {0xFE67, 0xFE67}, {0xFE6C, 0xFE6F}, {0xFE75, 0xFE75}, {0xFEFD, 0xFF00}, {0xFFBF, 0xFFC1},
+{0xFFC8, 0xFFC9}, {0xFFD0, 0xFFD1}, {0xFFD8, 0xFFD9}, {0xFFDD, 0xFFDF}, {0xFFE7, 0xFFE7}, {0xFFEF, 0xFFFB}, {0xFFFE, 0xFFFF}, {0x1000C, 0x1000C}, {0x10027, 0x10027}, {0x1003B, 0x1003B},
+{0x1003E, 0x1003E}, {0x1004E, 0x1004F}, {0x1005E, 0x1007F}, {0x100FB, 0x100FF}, {0x10103, 0x10106}, {0x10134, 0x10136}, {0x1018F, 0x1018F}, {0x1019D, 0x1019F}, {0x101A1, 0x101CF}, {0x101FE, 0x1027F},
+{0x1029D, 0x1029F}, {0x102D1, 0x102DF}, {0x102FC, 0x102FF}, {0x10324, 0x1032C}, {0x1034B, 0x1034F}, {0x1037B, 0x1037F}, {0x1039E, 0x1039E}, {0x103C4, 0x103C7}, {0x103D6, 0x103FF}, {0x1049E, 0x1049F},
+{0x104AA, 0x104AF}, {0x104D4, 0x104D7}, {0x104FC, 0x104FF}, {0x10528, 0x1052F}, {0x10564, 0x1056E}, {0x10570, 0x105FF}, {0x10737, 0x1073F}, {0x10756, 0x1075F}, {0x10768, 0x107FF}, {0x10806, 0x10807},
+{0x10809, 0x10809}, {0x10836, 0x10836}, {0x10839, 0x1083B}, {0x1083D, 0x1083E}, {0x10856, 0x10856}, {0x1089F, 0x108A6}, {0x108B0, 0x108DF}, {0x108F3, 0x108F3}, {0x108F6, 0x108FA}, {0x1091C, 0x1091E},
+{0x1093A, 0x1093E}, {0x10940, 0x1097F}, {0x109B8, 0x109BB}, {0x109D0, 0x109D1}, {0x10A04, 0x10A04}, {0x10A07, 0x10A0B}, {0x10A14, 0x10A14}, {0x10A18, 0x10A18}, {0x10A36, 0x10A37}, {0x10A3B, 0x10A3E},
+{0x10A49, 0x10A4F}, {0x10A59, 0x10A5F}, {0x10AA0, 0x10ABF}, {0x10AE7, 0x10AEA}, {0x10AF7, 0x10AFF}, {0x10B36, 0x10B38}, {0x10B56, 0x10B57}, {0x10B73, 0x10B77}, {0x10B92, 0x10B98}, {0x10B9D, 0x10BA8},
+{0x10BB0, 0x10BFF}, {0x10C49, 0x10C7F}, {0x10CB3, 0x10CBF}, {0x10CF3, 0x10CF9}, {0x10D28, 0x10D2F}, {0x10D3A, 0x10E5F}, {0x10E7F, 0x10E7F}, {0x10EAA, 0x10EAA}, {0x10EAE, 0x10EAF}, {0x10EB2, 0x10EFF},
+{0x10F28, 0x10F2F}, {0x10F5A, 0x10FAF}, {0x10FCC, 0x10FDF}, {0x10FF7, 0x10FFF}, {0x1104E, 0x11051}, {0x11070, 0x1107E}, {0x110BD, 0x110BD}, {0x110C2, 0x110CF}, {0x110E9, 0x110EF}, {0x110FA, 0x110FF},
+{0x11135, 0x11135}, {0x11148, 0x1114F}, {0x11177, 0x1117F}, {0x111E0, 0x111E0}, {0x111F5, 0x111FF}, {0x11212, 0x11212}, {0x1123F, 0x1127F}, {0x11287, 0x11287}, {0x11289, 0x11289}, {0x1128E, 0x1128E},
+{0x1129E, 0x1129E}, {0x112AA, 0x112AF}, {0x112EB, 0x112EF}, {0x112FA, 0x112FF}, {0x11304, 0x11304}, {0x1130D, 0x1130E}, {0x11311, 0x11312}, {0x11329, 0x11329}, {0x11331, 0x11331}, {0x11334, 0x11334},
+{0x1133A, 0x1133A}, {0x11345, 0x11346}, {0x11349, 0x1134A}, {0x1134E, 0x1134F}, {0x11351, 0x11356}, {0x11358, 0x1135C}, {0x11364, 0x11365}, {0x1136D, 0x1136F}, {0x11375, 0x113FF}, {0x1145C, 0x1145C},
+{0x11462, 0x1147F}, {0x114C8, 0x114CF}, {0x114DA, 0x1157F}, {0x115B6, 0x115B7}, {0x115DE, 0x115FF}, {0x11645, 0x1164F}, {0x1165A, 0x1165F}, {0x1166D, 0x1167F}, {0x116B9, 0x116BF}, {0x116CA, 0x116FF},
+{0x1171B, 0x1171C}, {0x1172C, 0x1172F}, {0x11740, 0x117FF}, {0x1183C, 0x1189F}, {0x118F3, 0x118FE}, {0x11907, 0x11908}, {0x1190A, 0x1190B}, {0x11914, 0x11914}, {0x11917, 0x11917}, {0x11936, 0x11936},
+{0x11939, 0x1193A}, {0x11947, 0x1194F}, {0x1195A, 0x1199F}, {0x119A8, 0x119A9}, {0x119D8, 0x119D9}, {0x119E5, 0x119FF}, {0x11A48, 0x11A4F}, {0x11AA3, 0x11ABF}, {0x11AF9, 0x11BFF}, {0x11C09, 0x11C09},
+{0x11C37, 0x11C37}, {0x11C46, 0x11C4F}, {0x11C6D, 0x11C6F}, {0x11C90, 0x11C91}, {0x11CA8, 0x11CA8}, {0x11CB7, 0x11CFF}, {0x11D07, 0x11D07}, {0x11D0A, 0x11D0A}, {0x11D37, 0x11D39}, {0x11D3B, 0x11D3B},
+{0x11D3E, 0x11D3E}, {0x11D48, 0x11D4F}, {0x11D5A, 0x11D5F}, {0x11D66, 0x11D66}, {0x11D69, 0x11D69}, {0x11D8F, 0x11D8F}, {0x11D92, 0x11D92}, {0x11D99, 0x11D9F}, {0x11DAA, 0x11EDF}, {0x11EF9, 0x11FAF},
+{0x11FB1, 0x11FBF}, {0x11FF2, 0x11FFE}, {0x1239A, 0x123FF}, {0x1246F, 0x1246F}, {0x12475, 0x1247F}, {0x12544, 0x12FFF}, {0x1342F, 0x143FF}, {0x14647, 0x167FF}, {0x16A39, 0x16A3F}, {0x16A5F, 0x16A5F},
+{0x16A6A, 0x16A6D}, {0x16A70, 0x16ACF}, {0x16AEE, 0x16AEF}, {0x16AF6, 0x16AFF}, {0x16B46, 0x16B4F}, {0x16B5A, 0x16B5A}, {0x16B62, 0x16B62}, {0x16B78, 0x16B7C}, {0x16B90, 0x16E3F}, {0x16E9B, 0x16EFF},
+{0x16F4B, 0x16F4E}, {0x16F88, 0x16F8E}, {0x16FA0, 0x16FDF}, {0x16FE5, 0x16FEF}, {0x16FF2, 0x16FFF}, {0x187F8, 0x187FF}, {0x18CD6, 0x18CFF}, {0x18D09, 0x1AFFF}, {0x1B11F, 0x1B14F}, {0x1B153, 0x1B163},
+{0x1B168, 0x1B16F}, {0x1B2FC, 0x1BBFF}, {0x1BC6B, 0x1BC6F}, {0x1BC7D, 0x1BC7F}, {0x1BC89, 0x1BC8F}, {0x1BC9A, 0x1BC9B}, {0x1BCA0, 0x1CFFF}, {0x1D0F6, 0x1D0FF}, {0x1D127, 0x1D128}, {0x1D173, 0x1D17A},
+{0x1D1E9, 0x1D1FF}, {0x1D246, 0x1D2DF}, {0x1D2F4, 0x1D2FF}, {0x1D357, 0x1D35F}, {0x1D379, 0x1D3FF}, {0x1D455, 0x1D455}, {0x1D49D, 0x1D49D}, {0x1D4A0, 0x1D4A1}, {0x1D4A3, 0x1D4A4}, {0x1D4A7, 0x1D4A8},
+{0x1D4AD, 0x1D4AD}, {0x1D4BA, 0x1D4BA}, {0x1D4BC, 0x1D4BC}, {0x1D4C4, 0x1D4C4}, {0x1D506, 0x1D506}, {0x1D50B, 0x1D50C}, {0x1D515, 0x1D515}, {0x1D51D, 0x1D51D}, {0x1D53A, 0x1D53A}, {0x1D53F, 0x1D53F},
+{0x1D545, 0x1D545}, {0x1D547, 0x1D549}, {0x1D551, 0x1D551}, {0x1D6A6, 0x1D6A7}, {0x1D7CC, 0x1D7CD}, {0x1DA8C, 0x1DA9A}, {0x1DAA0, 0x1DAA0}, {0x1DAB0, 0x1DFFF}, {0x1E007, 0x1E007}, {0x1E019, 0x1E01A},
+{0x1E022, 0x1E022}, {0x1E025, 0x1E025}, {0x1E02B, 0x1E0FF}, {0x1E12D, 0x1E12F}, {0x1E13E, 0x1E13F}, {0x1E14A, 0x1E14D}, {0x1E150, 0x1E2BF}, {0x1E2FA, 0x1E2FE}, {0x1E300, 0x1E7FF}, {0x1E8C5, 0x1E8C6},
+{0x1E8D7, 0x1E8FF}, {0x1E94C, 0x1E94F}, {0x1E95A, 0x1E95D}, {0x1E960, 0x1EC70}, {0x1ECB5, 0x1ED00}, {0x1ED3E, 0x1EDFF}, {0x1EE04, 0x1EE04}, {0x1EE20, 0x1EE20}, {0x1EE23, 0x1EE23}, {0x1EE25, 0x1EE26},
+{0x1EE28, 0x1EE28}, {0x1EE33, 0x1EE33}, {0x1EE38, 0x1EE38}, {0x1EE3A, 0x1EE3A}, {0x1EE3C, 0x1EE41}, {0x1EE43, 0x1EE46}, {0x1EE48, 0x1EE48}, {0x1EE4A, 0x1EE4A}, {0x1EE4C, 0x1EE4C}, {0x1EE50, 0x1EE50},
+{0x1EE53, 0x1EE53}, {0x1EE55, 0x1EE56}, {0x1EE58, 0x1EE58}, {0x1EE5A, 0x1EE5A}, {0x1EE5C, 0x1EE5C}, {0x1EE5E, 0x1EE5E}, {0x1EE60, 0x1EE60}, {0x1EE63, 0x1EE63}, {0x1EE65, 0x1EE66}, {0x1EE6B, 0x1EE6B},
+{0x1EE73, 0x1EE73}, {0x1EE78, 0x1EE78}, {0x1EE7D, 0x1EE7D}, {0x1EE7F, 0x1EE7F}, {0x1EE8A, 0x1EE8A}, {0x1EE9C, 0x1EEA0}, {0x1EEA4, 0x1EEA4}, {0x1EEAA, 0x1EEAA}, {0x1EEBC, 0x1EEEF}, {0x1EEF2, 0x1EFFF},
+{0x1F02C, 0x1F02F}, {0x1F094, 0x1F09F}, {0x1F0AF, 0x1F0B0}, {0x1F0C0, 0x1F0C0}, {0x1F0D0, 0x1F0D0}, {0x1F0F6, 0x1F0FF}, {0x1F1AE, 0x1F1E5}, {0x1F203, 0x1F20F}, {0x1F23C, 0x1F23F}, {0x1F249, 0x1F24F},
+{0x1F252, 0x1F25F}, {0x1F266, 0x1F2FF}, {0x1F6D8, 0x1F6DF}, {0x1F6ED, 0x1F6EF}, {0x1F6FD, 0x1F6FF}, {0x1F774, 0x1F77F}, {0x1F7D9, 0x1F7DF}, {0x1F7EC, 0x1F7FF}, {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F},
+{0x1F85A, 0x1F85F}, {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F8AF}, {0x1F8B2, 0x1F8FF}, {0x1F979, 0x1F979}, {0x1F9CC, 0x1F9CC}, {0x1FA54, 0x1FA5F}, {0x1FA6E, 0x1FA6F}, {0x1FA75, 0x1FA77}, {0x1FA7B, 0x1FA7F},
+{0x1FA87, 0x1FA8F}, {0x1FAA9, 0x1FAAF}, {0x1FAB7, 0x1FABF}, {0x1FAC3, 0x1FACF}, {0x1FAD7, 0x1FAFF}, {0x1FB93, 0x1FB93}, {0x1FBCB, 0x1FBEF}, {0x1FBFA, 0x1FFFF}, {0x2A6DE, 0x2A6FF}, {0x2B735, 0x2B73F},
+{0x2B81E, 0x2B81F}, {0x2CEA2, 0x2CEAF}, {0x2EBE1, 0x2F7FF}, {0x2FA1E, 0x2FFFF}, {0x3134B, 0xE00FF}, {0xE01F0, 0x10FFFF},
+};
+
+static std::string codepoint_to_utf8(uint32_t cp) {
+ std::string result;
+ if (/* 0x00 <= cp && */ cp <= 0x7f) {
+ result.push_back(cp);
+ }
+ else if (0x80 <= cp && cp <= 0x7ff) {
+ result.push_back(0xc0 | ((cp >> 6) & 0x1f));
+ result.push_back(0x80 | (cp & 0x3f));
+ }
+ else if (0x800 <= cp && cp <= 0xffff) {
+ result.push_back(0xe0 | ((cp >> 12) & 0x0f));
+ result.push_back(0x80 | ((cp >> 6) & 0x3f));
+ result.push_back(0x80 | (cp & 0x3f));
+ }
+ else if (0x10000 <= cp && cp <= 0x10ffff) {
+ result.push_back(0xf0 | ((cp >> 18) & 0x07));
+ result.push_back(0x80 | ((cp >> 12) & 0x3f));
+ result.push_back(0x80 | ((cp >> 6) & 0x3f));
+ result.push_back(0x80 | (cp & 0x3f));
+ }
+ else {
+ throw std::invalid_argument("invalid codepoint");
+ }
+ return result;
+}
+
+static std::string codepoints_to_utf8(const std::vector<uint32_t> & cps) {
+ std::string result;
+ for (size_t i = 0; i < cps.size(); ++i) {
+ result.append(codepoint_to_utf8(cps[i]));
+ }
+ return result;
+}
+
+static uint32_t codepoint_from_utf8(const std::string & utf8, size_t & offset) {
+ assert(offset < utf8.size());
+ if (!(utf8[offset + 0] & 0x80)) {
+ auto result = utf8[offset + 0];
+ offset += 1;
+ return result;
+ }
+ else if (!(utf8[offset + 0] & 0x40)) {
+ throw std::invalid_argument("invalid character");
+ }
+ else if (!(utf8[offset + 0] & 0x20)) {
+ if (offset + 1 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80))
+ throw std::invalid_argument("invalid character");
+ auto result = ((utf8[offset + 0] & 0x1f) << 6) | (utf8[offset + 1] & 0x3f);
+ offset += 2;
+ return result;
+ }
+ else if (!(utf8[offset + 0] & 0x10)) {
+ if (offset + 2 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80) || ! ((utf8[offset + 2] & 0xc0) == 0x80))
+ throw std::invalid_argument("invalid character");
+ auto result = ((utf8[offset + 0] & 0x0f) << 12) | ((utf8[offset + 1] & 0x3f) << 6) | (utf8[offset + 2] & 0x3f);
+ offset += 3;
+ return result;
+ }
+ else if (!(utf8[offset + 0] & 0x08)) {
+ if (offset + 3 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80) || ! ((utf8[offset + 2] & 0xc0) == 0x80) || !((utf8[offset + 3] & 0xc0) == 0x80))
+ throw std::invalid_argument("invalid character");
+ auto result = ((utf8[offset + 0] & 0x07) << 18) | ((utf8[offset + 1] & 0x3f) << 12) | ((utf8[offset + 2] & 0x3f) << 6) | (utf8[offset + 3] & 0x3f);
+ offset += 4;
+ return result;
+ }
+ throw std::invalid_argument("invalid string");
+}
+
+static std::vector<uint32_t> codepoints_from_utf8(const std::string & utf8) {
+ std::vector<uint32_t> result;
+ size_t offset = 0;
+ while (offset < utf8.size()) {
+ result.push_back(codepoint_from_utf8(utf8, offset));
+ }
+ return result;
+}
+
+static std::vector<uint16_t> codepoint_to_utf16(uint32_t cp) {
+ std::vector<uint16_t> result;
+ if (/* 0x0000 <= cp && */ cp <= 0xffff) {
+ result.emplace_back(cp);
+ }
+ else if (0x10000 <= cp && cp <= 0x10ffff) {
+ result.emplace_back(0xd800 | ((cp - 0x10000) >> 10));
+ result.emplace_back(0xdc00 | ((cp - 0x10000) & 0x03ff));
+ }
+ else {
+ throw std::invalid_argument("invalid codepoint");
+ }
+ return result;
+}
+
+static std::vector<uint16_t> codepoints_to_utf16(const std::vector<uint32_t> & cps) {
+ std::vector<uint16_t> result;
+ for (size_t i = 0; i < cps.size(); ++i) {
+ auto temp = codepoint_to_utf16(cps[i]);
+ result.insert(result.end(), temp.begin(), temp.end());
+ }
+ return result;
+}
+
+static uint32_t codepoint_from_utf16(const std::vector<uint16_t> & utf16, size_t & offset) {
+ assert(offset < utf16.size());
+ if (((utf16[0] >> 10) << 10) != 0xd800) {
+ auto result = utf16[offset + 0];
+ offset += 1;
+ return result;
+ }
+ else {
+ if (offset + 1 >= utf16.size() || !((utf16[1] & 0xdc00) == 0xdc00))
+ throw std::invalid_argument("invalid character");
+ auto result = 0x10000 + (((utf16[0] & 0x03ff) << 10) | (utf16[1] & 0x03ff));
+ offset += 2;
+ return result;
+ }
+ throw std::invalid_argument("invalid string");
+}
+
+static std::vector<uint32_t> codepoints_from_utf16(const std::vector<uint16_t> & utf16) {
+ std::vector<uint32_t> result;
+ size_t offset = 0;
+ while (offset < utf16.size())
+ result.push_back(codepoint_from_utf16(utf16, offset));
+ return result;
+}
+
+#define CODEPOINT_TYPE_UNIDENTIFIED 0
+#define CODEPOINT_TYPE_DIGIT 1
+#define CODEPOINT_TYPE_LETTER 2
+#define CODEPOINT_TYPE_WHITESPACE 3
+#define CODEPOINT_TYPE_ACCENT_MARK 4
+#define CODEPOINT_TYPE_PUNCTUATION 5
+#define CODEPOINT_TYPE_SYMBOL 6
+#define CODEPOINT_TYPE_CONTROL 7
+
+static std::unordered_map<uint32_t, int> codepoint_type_map() {
+ std::unordered_map<uint32_t, int> codepoint_types;
+ for (auto p : digit_ranges) {
+ for(auto i = p.first; i <= p.second; ++ i)
+ codepoint_types[i] = CODEPOINT_TYPE_DIGIT;
+ }
+ for(auto p : letter_ranges) {
+ for(auto i = p.first; i <= p.second; ++ i)
+ codepoint_types[i] = CODEPOINT_TYPE_LETTER;
+ }
+ for(auto p : whitespace_ranges) {
+ for(auto i = p.first; i <= p.second; ++ i)
+ codepoint_types[i] = CODEPOINT_TYPE_WHITESPACE;
+ }
+ for(auto p : accent_mark_ranges) {
+ for(auto i = p.first; i <= p.second; ++ i)
+ codepoint_types[i] = CODEPOINT_TYPE_ACCENT_MARK;
+ }
+ for(auto p : punctuation_ranges) {
+ for(auto i = p.first; i <= p.second; ++ i)
+ codepoint_types[i] = CODEPOINT_TYPE_PUNCTUATION;
+ }
+ for (auto p : symbol_ranges) {
+ for (auto i = p.first; i <= p.second; ++i)
+ codepoint_types[i] = CODEPOINT_TYPE_SYMBOL;
+ }
+ for(auto p : control_ranges) {
+ for(auto i = p.first; i <= p.second; ++ i)
+ codepoint_types[i] = CODEPOINT_TYPE_CONTROL;
+ }
+ return codepoint_types;
+}
+
+static int codepoint_type(uint32_t cp) {
+ static std::unordered_map<uint32_t, int> codepoint_types = codepoint_type_map();
+ return codepoint_types[cp];
+}
+
+static int codepoint_type(const std::string & utf8) {
+ if (utf8.length() == 0)
+ return CODEPOINT_TYPE_UNIDENTIFIED;
+ size_t offset = 0;
+ return codepoint_type(codepoint_from_utf8(utf8, offset));
+}
+
+static std::unordered_map<uint8_t, std::string> bytes_to_unicode_map_bpe() {
+ std::unordered_map<uint8_t, std::string> map;
+ for (int ch = u'!'; ch <= u'~'; ++ch) {
+ assert(0 <= ch && ch < 256);
+ map[ch] = codepoint_to_utf8(ch);
+ }
+ for (int ch = u'¡'; ch <= u'¬'; ++ch) {
+ assert(0 <= ch && ch < 256);
+ map[ch] = codepoint_to_utf8(ch);
+ }
+ for (int ch = u'®'; ch <= u'ÿ'; ++ch) {
+ assert(0 <= ch && ch < 256);
+ map[ch] = codepoint_to_utf8(ch);
+ }
+ auto n = 0;
+ for (int ch = 0; ch < 256; ++ch) {
+ if (map.find(ch) == map.end()) {
+ map[ch] = codepoint_to_utf8(256 + n);
+ ++n;
+ }
+ }
+ return map;
+}
+
+static std::string bytes_to_unicode_bpe(uint8_t byte) {
+ static std::unordered_map<uint8_t, std::string> map = bytes_to_unicode_map_bpe();
+ return map.at(byte);
+}
+
+static std::unordered_map<std::string, uint8_t> unicode_to_bytes_map_bpe() {
+ std::unordered_map<std::string, uint8_t> map;
+ for (int ch = u'!'; ch <= u'~'; ++ch) {
+ assert(0 <= ch && ch < 256);
+ map[codepoint_to_utf8(ch)] = ch;
+ }
+ for (int ch = u'¡'; ch <= u'¬'; ++ch) {
+ assert(0 <= ch && ch < 256);
+ map[codepoint_to_utf8(ch)] = ch;
+ }
+ for (int ch = u'®'; ch <= u'ÿ'; ++ch) {
+ assert(0 <= ch && ch < 256);
+ map[codepoint_to_utf8(ch)] = ch;
+ }
+ auto n = 0;
+ for (int ch = 0; ch < 256; ++ch) {
+ if (map.find(codepoint_to_utf8(ch)) == map.end()) {
+ map[codepoint_to_utf8(256 + n)] = ch;
+ ++n;
+ }
+ }
+ return map;
+}
+
+static uint8_t unicode_to_bytes_bpe(const std::string & utf8) {
+ static std::unordered_map<std::string, uint8_t> map = unicode_to_bytes_map_bpe();
+ return map.at(utf8);
+}
+
SOURCE_FILES
${WHISPER_LIB_DIR}/ggml.c
${WHISPER_LIB_DIR}/ggml-alloc.c
+ ${WHISPER_LIB_DIR}/ggml-backend.c
+ ${WHISPER_LIB_DIR}/ggml-quants.c
${WHISPER_LIB_DIR}/whisper.cpp
${CMAKE_SOURCE_DIR}/jni.c
)
18627C9429052C4900BD2A04 /* whisper.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 18627C9329052C4900BD2A04 /* whisper.cpp */; settings = {COMPILER_FLAGS = "-DWHISPER_USE_COREML"; }; };
18627C9629052C5800BD2A04 /* ggml.c in Sources */ = {isa = PBXBuildFile; fileRef = 18627C9529052C5800BD2A04 /* ggml.c */; settings = {COMPILER_FLAGS = "-DGGML_USE_ACCELERATE"; }; };
18627C9B29052CFF00BD2A04 /* ggml-base.en.bin in Resources */ = {isa = PBXBuildFile; fileRef = 18627C9A29052CFF00BD2A04 /* ggml-base.en.bin */; };
+ 18ABE15A2AF556340044A204 /* ggml-backend.c in Sources */ = {isa = PBXBuildFile; fileRef = 18ABE1572AF556340044A204 /* ggml-backend.c */; };
+ 18ABE15B2AF556340044A204 /* ggml-quants.c in Sources */ = {isa = PBXBuildFile; fileRef = 18ABE1592AF556340044A204 /* ggml-quants.c */; };
7FE3424B2A0C3FA20015A058 /* whisper-encoder-impl.m in Sources */ = {isa = PBXBuildFile; fileRef = 7FE342452A0C3FA20015A058 /* whisper-encoder-impl.m */; };
7FE3424C2A0C3FA20015A058 /* whisper-encoder.mm in Sources */ = {isa = PBXBuildFile; fileRef = 7FE342472A0C3FA20015A058 /* whisper-encoder.mm */; };
7FE3424D2A0C3FA20015A058 /* whisper-decoder-impl.m in Sources */ = {isa = PBXBuildFile; fileRef = 7FE3424A2A0C3FA20015A058 /* whisper-decoder-impl.m */; };
18627C9529052C5800BD2A04 /* ggml.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = ggml.c; path = ../../../ggml.c; sourceTree = "<group>"; };
18627C9729052C6600BD2A04 /* ggml.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = ggml.h; path = ../../../ggml.h; sourceTree = "<group>"; };
18627C9A29052CFF00BD2A04 /* ggml-base.en.bin */ = {isa = PBXFileReference; lastKnownFileType = archive.macbinary; name = "ggml-base.en.bin"; path = "../../../models/ggml-base.en.bin"; sourceTree = "<group>"; };
+ 18ABE1542AF556340044A204 /* ggml-quants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-quants.h"; path = "../../../ggml-quants.h"; sourceTree = "<group>"; };
+ 18ABE1552AF556340044A204 /* ggml-backend.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-backend.h"; path = "../../../ggml-backend.h"; sourceTree = "<group>"; };
+ 18ABE1562AF556340044A204 /* ggml-backend-impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-backend-impl.h"; path = "../../../ggml-backend-impl.h"; sourceTree = "<group>"; };
+ 18ABE1572AF556340044A204 /* ggml-backend.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-backend.c"; path = "../../../ggml-backend.c"; sourceTree = "<group>"; };
+ 18ABE1582AF556340044A204 /* ggml-impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "ggml-impl.h"; path = "../../../ggml-impl.h"; sourceTree = "<group>"; };
+ 18ABE1592AF556340044A204 /* ggml-quants.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = "ggml-quants.c"; path = "../../../ggml-quants.c"; sourceTree = "<group>"; };
7FE342452A0C3FA20015A058 /* whisper-encoder-impl.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "whisper-encoder-impl.m"; sourceTree = "<group>"; };
7FE342462A0C3FA20015A058 /* whisper-encoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "whisper-encoder.h"; sourceTree = "<group>"; };
7FE342472A0C3FA20015A058 /* whisper-encoder.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; path = "whisper-encoder.mm"; sourceTree = "<group>"; };
18627C7829052BDF00BD2A04 /* whisper.objc */ = {
isa = PBXGroup;
children = (
+ 18ABE1562AF556340044A204 /* ggml-backend-impl.h */,
+ 18ABE1572AF556340044A204 /* ggml-backend.c */,
+ 18ABE1552AF556340044A204 /* ggml-backend.h */,
+ 18ABE1582AF556340044A204 /* ggml-impl.h */,
+ 18ABE1592AF556340044A204 /* ggml-quants.c */,
+ 18ABE1542AF556340044A204 /* ggml-quants.h */,
1844471D2AB2195F007D6BFE /* ggml-metal.metal */,
1844471B2AB21655007D6BFE /* ggml-metal.m */,
184447182AB211A2007D6BFE /* ggml-alloc.c */,
buildActionMask = 2147483647;
files = (
18627C8129052BDF00BD2A04 /* ViewController.m in Sources */,
+ 18ABE15B2AF556340044A204 /* ggml-quants.c in Sources */,
7FE3424C2A0C3FA20015A058 /* whisper-encoder.mm in Sources */,
18627C9429052C4900BD2A04 /* whisper.cpp in Sources */,
18627C9629052C5800BD2A04 /* ggml.c in Sources */,
18627C7B29052BDF00BD2A04 /* AppDelegate.m in Sources */,
7FE3424D2A0C3FA20015A058 /* whisper-decoder-impl.m in Sources */,
1844471A2AB211A2007D6BFE /* ggml-alloc.c in Sources */,
+ 18ABE15A2AF556340044A204 /* ggml-backend.c in Sources */,
18627C8C29052BE000BD2A04 /* main.m in Sources */,
18627C7E29052BDF00BD2A04 /* SceneDelegate.m in Sources */,
1844471C2AB21655007D6BFE /* ggml-metal.m in Sources */,
0AAC5DCC29539EB1003032C3 /* ggml.c in Sources */ = {isa = PBXBuildFile; fileRef = 0AAC5DC929539EB0003032C3 /* ggml.c */; settings = {COMPILER_FLAGS = "-DGGML_USE_ACCELERATE -Wno-shorten-64-to-32"; }; };
0AAC5DCE2953A05C003032C3 /* WhisperState.swift in Sources */ = {isa = PBXBuildFile; fileRef = 0AAC5DCD2953A05C003032C3 /* WhisperState.swift */; };
0AAC5DD12953A394003032C3 /* LibWhisper.swift in Sources */ = {isa = PBXBuildFile; fileRef = 0AAC5DD02953A394003032C3 /* LibWhisper.swift */; };
+ 18ABE1522AF555FA0044A204 /* ggml-backend.c in Sources */ = {isa = PBXBuildFile; fileRef = 18ABE14C2AF555FA0044A204 /* ggml-backend.c */; };
+ 18ABE1532AF555FA0044A204 /* ggml-quants.c in Sources */ = {isa = PBXBuildFile; fileRef = 18ABE1512AF555FA0044A204 /* ggml-quants.c */; };
18AED4812AB21F2B009D854F /* ggml-alloc.c in Sources */ = {isa = PBXBuildFile; fileRef = 18AED47F2AB21F2B009D854F /* ggml-alloc.c */; };
/* End PBXBuildFile section */
0AAC5DCA29539EB0003032C3 /* ggml.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ggml.h; sourceTree = "<group>"; };
0AAC5DCD2953A05C003032C3 /* WhisperState.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WhisperState.swift; sourceTree = "<group>"; };
0AAC5DD02953A394003032C3 /* LibWhisper.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LibWhisper.swift; sourceTree = "<group>"; };
+ 18ABE14C2AF555FA0044A204 /* ggml-backend.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = "ggml-backend.c"; sourceTree = "<group>"; };
+ 18ABE14D2AF555FA0044A204 /* ggml-backend.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "ggml-backend.h"; sourceTree = "<group>"; };
+ 18ABE14E2AF555FA0044A204 /* ggml-backend-impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "ggml-backend-impl.h"; sourceTree = "<group>"; };
+ 18ABE14F2AF555FA0044A204 /* ggml-quants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "ggml-quants.h"; sourceTree = "<group>"; };
+ 18ABE1502AF555FA0044A204 /* ggml-impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "ggml-impl.h"; sourceTree = "<group>"; };
+ 18ABE1512AF555FA0044A204 /* ggml-quants.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = "ggml-quants.c"; sourceTree = "<group>"; };
18AED47F2AB21F2B009D854F /* ggml-alloc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = "ggml-alloc.c"; sourceTree = "<group>"; };
18AED4802AB21F2B009D854F /* ggml-alloc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "ggml-alloc.h"; sourceTree = "<group>"; };
/* End PBXFileReference section */
0AAC5DC529539E89003032C3 /* whisper.cpp */ = {
isa = PBXGroup;
children = (
+ 18ABE14E2AF555FA0044A204 /* ggml-backend-impl.h */,
+ 18ABE14C2AF555FA0044A204 /* ggml-backend.c */,
+ 18ABE14D2AF555FA0044A204 /* ggml-backend.h */,
+ 18ABE1502AF555FA0044A204 /* ggml-impl.h */,
+ 18ABE1512AF555FA0044A204 /* ggml-quants.c */,
+ 18ABE14F2AF555FA0044A204 /* ggml-quants.h */,
18AED47F2AB21F2B009D854F /* ggml-alloc.c */,
18AED4802AB21F2B009D854F /* ggml-alloc.h */,
0AAC5DC929539EB0003032C3 /* ggml.c */,
0AAC5D9D29539CCF003032C3 /* ContentView.swift in Sources */,
0AAC5D9B29539CCF003032C3 /* WhisperCppDemoApp.swift in Sources */,
0AAC5DCC29539EB1003032C3 /* ggml.c in Sources */,
+ 18ABE1532AF555FA0044A204 /* ggml-quants.c in Sources */,
0AAC5DCE2953A05C003032C3 /* WhisperState.swift in Sources */,
0AAC5DD12953A394003032C3 /* LibWhisper.swift in Sources */,
0AA7514C2953B569001EE061 /* RiffWaveUtils.swift in Sources */,
0AAC5DCB29539EB1003032C3 /* whisper.cpp in Sources */,
0AA7514E2953D958001EE061 /* Recorder.swift in Sources */,
18AED4812AB21F2B009D854F /* ggml-alloc.c in Sources */,
+ 18ABE1522AF555FA0044A204 /* ggml-backend.c in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
#!/bin/bash
-cp -rpv ../ggml/src/ggml.c ./ggml.c
-cp -rpv ../ggml/src/ggml-alloc.c ./ggml-alloc.c
-cp -rpv ../ggml/src/ggml-cuda.h ./ggml-cuda.h
-cp -rpv ../ggml/src/ggml-cuda.cu ./ggml-cuda.cu
-cp -rpv ../ggml/src/ggml-opencl.h ./ggml-opencl.h
-cp -rpv ../ggml/src/ggml-opencl.cpp ./ggml-opencl.cpp
-cp -rpv ../ggml/src/ggml-metal.h ./ggml-metal.h
-cp -rpv ../ggml/src/ggml-metal.m ./ggml-metal.m
-cp -rpv ../ggml/src/ggml-metal.metal ./ggml-metal.metal
-cp -rpv ../ggml/include/ggml/ggml.h ./ggml.h
-cp -rpv ../ggml/include/ggml/ggml-alloc.h ./ggml-alloc.h
-cp -rpv ../ggml/examples/common.h ./examples/common.h
-cp -rpv ../ggml/examples/common.cpp ./examples/common.cpp
-cp -rpv ../ggml/examples/common-ggml.h ./examples/common-ggml.h
-cp -rpv ../ggml/examples/common-ggml.cpp ./examples/common-ggml.cpp
+cp -rpv ../ggml/src/ggml.c ./ggml.c
+cp -rpv ../ggml/src/ggml-alloc.c ./ggml-alloc.c
+cp -rpv ../ggml/src/ggml-backend-impl.h ./ggml-backend-impl.h
+cp -rpv ../ggml/src/ggml-backend.c ./ggml-backend.c
+cp -rpv ../ggml/src/ggml-cuda.cu ./ggml-cuda.cu
+cp -rpv ../ggml/src/ggml-cuda.h ./ggml-cuda.h
+cp -rpv ../ggml/src/ggml-impl.h ./ggml-impl.h
+cp -rpv ../ggml/src/ggml-metal.h ./ggml-metal.h
+cp -rpv ../ggml/src/ggml-metal.m ./ggml-metal.m
+cp -rpv ../ggml/src/ggml-metal.metal ./ggml-metal.metal
+#cp -rpv ../ggml/src/ggml-mpi.h ./ggml-mpi.h
+#cp -rpv ../ggml/src/ggml-mpi.c ./ggml-mpi.c
+cp -rpv ../ggml/src/ggml-opencl.cpp ./ggml-opencl.cpp
+cp -rpv ../ggml/src/ggml-opencl.h ./ggml-opencl.h
+cp -rpv ../ggml/src/ggml-quants.c ./ggml-quants.c
+cp -rpv ../ggml/src/ggml-quants.h ./ggml-quants.h
+
+cp -rpv ../ggml/include/ggml/ggml.h ./ggml.h
+cp -rpv ../ggml/include/ggml/ggml-alloc.h ./ggml-alloc.h
+cp -rpv ../ggml/include/ggml/ggml-backend.h ./ggml-backend.h
+
+cp -rpv ../ggml/examples/common.h ./examples/common.h
+cp -rpv ../ggml/examples/common.cpp ./examples/common.cpp
+cp -rpv ../ggml/examples/common-ggml.h ./examples/common-ggml.h
+cp -rpv ../ggml/examples/common-ggml.cpp ./examples/common-ggml.cpp
cp -rpv ../ggml/examples/whisper/whisper.h ./whisper.h
cp -rpv ../ggml/examples/whisper/whisper.cpp ./whisper.cpp
#include "ggml-alloc.h"
+#include "ggml-backend-impl.h"
#include "ggml.h"
+#include "ggml-impl.h"
#include <assert.h>
+#include <limits.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#ifdef __has_include
- #if __has_include(<unistd.h>)
- #include <unistd.h>
- #if defined(_POSIX_MAPPED_FILES)
- #include <sys/types.h>
- #include <sys/mman.h>
- #endif
- #endif
-#endif
-
-#if defined(_WIN32)
- #define WIN32_LEAN_AND_MEAN
- #ifndef NOMINMAX
- #define NOMINMAX
- #endif
- #include <windows.h>
- #include <memoryapi.h>
-#endif
-
-
-#define UNUSED(x) (void)(x)
#define MAX(a, b) ((a) > (b) ? (a) : (b))
-#define GGML_MAX_CONCUR (2*GGML_MAX_NODES)
+#define MAX_FREE_BLOCKS 256
//#define GGML_ALLOCATOR_DEBUG
-//#define AT_PRINTF printf
-#define AT_PRINTF(...) ((void)0)
-
-struct hash_node {
- struct ggml_tensor * t;
- int n_children;
- int n_views;
-};
-
-static size_t hash(void * p) {
- return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE;
-}
-
-static struct hash_node * hash_get(struct hash_node hash_table[], struct ggml_tensor * t) {
- size_t h = hash(t);
-
- // linear probing
- size_t i = h;
- while (hash_table[i].t != NULL) {
- if (hash_table[i].t == t) {
- return &hash_table[i];
- }
- i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE;
- if (i == h) {
- // hash table is full
- GGML_ASSERT(false);
- }
- }
-
- hash_table[i].t = t;
- return &hash_table[i];
-}
+//#define AT_PRINTF(...) fprintf(stderr, __VA_ARGS__)
+#define AT_PRINTF(...)
// TODO: GGML_PAD ?
static size_t aligned_offset(const void * buffer, size_t offset, size_t alignment) {
size_t size;
};
-#define MAX_FREE_BLOCKS 128
-
-struct ggml_allocr {
- void * data;
- size_t size;
+struct ggml_tallocr {
+ struct ggml_backend_buffer * buffer;
+ bool buffer_owned;
+ void * base;
size_t alignment;
+
int n_free_blocks;
struct free_block free_blocks[MAX_FREE_BLOCKS];
- struct hash_node hash_table[GGML_GRAPH_HASHTABLE_SIZE];
+
size_t max_size;
+
bool measure;
- int parse_seq[GGML_MAX_CONCUR];
- int parse_seq_len;
#ifdef GGML_ALLOCATOR_DEBUG
struct ggml_tensor * allocated_tensors[1024];
};
#ifdef GGML_ALLOCATOR_DEBUG
-static void add_allocated_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
+static void add_allocated_tensor(ggml_tallocr_t alloc, struct ggml_tensor * tensor) {
for (int i = 0; i < 1024; i++) {
if (alloc->allocated_tensors[i] == NULL) {
alloc->allocated_tensors[i] = tensor;
}
GGML_ASSERT(!"out of allocated_tensors");
}
-static void remove_allocated_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
+static void remove_allocated_tensor(ggml_tallocr_t alloc, struct ggml_tensor * tensor) {
for (int i = 0; i < 1024; i++) {
if (alloc->allocated_tensors[i] == tensor ||
(alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
}
#endif
-static size_t ggml_allocr_get_alloc_size(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
- return ggml_nbytes(tensor);
-
- UNUSED(alloc);
-}
-
// check if a tensor is allocated by this buffer
-static bool ggml_allocr_is_own(struct ggml_allocr * alloc, const struct ggml_tensor * tensor) {
- void * ptr = tensor->data;
- return ptr >= alloc->data && (char *)ptr < (char *)alloc->data + alloc->max_size;
+static bool ggml_tallocr_is_own(ggml_tallocr_t alloc, const struct ggml_tensor * tensor) {
+ return tensor->buffer == alloc->buffer;
}
static bool ggml_is_view(struct ggml_tensor * t) {
return t->view_src != NULL;
}
-void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
-#ifdef GGML_ALLOCATOR_DEBUG
+void ggml_tallocr_alloc(ggml_tallocr_t alloc, struct ggml_tensor * tensor) {
GGML_ASSERT(!ggml_is_view(tensor)); // views generally get data pointer from one of their sources
GGML_ASSERT(tensor->data == NULL); // avoid allocating tensor which already has memory allocated
-#endif
- size_t size = ggml_allocr_get_alloc_size(alloc, tensor);
+
+ size_t size = ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor);
size = aligned_offset(NULL, size, alloc->alignment);
AT_PRINTF("%s: allocating %s (%zu bytes) - ", __func__, tensor->name, size);
}
tensor->data = addr;
+ tensor->buffer = alloc->buffer;
+ if (!alloc->measure) {
+ ggml_backend_buffer_init_tensor(alloc->buffer, tensor);
+ }
#ifdef GGML_ALLOCATOR_DEBUG
add_allocated_tensor(alloc, tensor);
}
#endif
- alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->data + size);
+ alloc->max_size = MAX(alloc->max_size, (char*)addr - (char*)alloc->base + size);
}
// this is a very naive implementation, but for our case the number of free blocks should be very small
-static void ggml_allocr_free_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
- void * ptr = tensor->data;
-
- if (ggml_allocr_is_own(alloc, tensor) == false) {
+static void ggml_tallocr_free_tensor(ggml_tallocr_t alloc, struct ggml_tensor * tensor) {
+ if (ggml_tallocr_is_own(alloc, tensor) == false) {
// the tensor was not allocated in this buffer
// this can happen because the graph allocator will try to free weights and other tensors from different buffers
// the easiest way to deal with this is just to ignore it
+ // AT_PRINTF("ignoring %s (their buffer: %p, our buffer: %p)\n", tensor->name, (void *)tensor->buffer, (void *)alloc->buffer);
return;
}
- size_t size = ggml_allocr_get_alloc_size(alloc, tensor);
+ void * ptr = tensor->data;
+
+ size_t size = ggml_backend_buffer_get_alloc_size(alloc->buffer, tensor);
size = aligned_offset(NULL, size, alloc->alignment);
- AT_PRINTF("%s: freeing %s (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, size, alloc->n_free_blocks);
+ AT_PRINTF("%s: freeing %s at %p (%zu bytes) - n_free_blocks = %d\n", __func__, tensor->name, ptr, size, alloc->n_free_blocks);
+
+ if (!alloc->measure) {
+ ggml_backend_buffer_free_tensor(alloc->buffer, tensor);
+ }
#ifdef GGML_ALLOCATOR_DEBUG
remove_allocated_tensor(alloc, tensor);
alloc->n_free_blocks++;
}
-void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, const int * list, int n) {
- for (int i = 0; i < n; i++) {
- alloc->parse_seq[i] = list[i];
+void ggml_tallocr_reset(ggml_tallocr_t alloc) {
+ alloc->n_free_blocks = 1;
+ size_t align_offset = aligned_offset(alloc->base, 0, alloc->alignment);
+ alloc->free_blocks[0].addr = (char *)alloc->base + align_offset;
+
+ if (alloc->measure) {
+ alloc->free_blocks[0].size = SIZE_MAX/2; // restrict maximum size of a measure allocator to half size_t max to avoid overflows
+ } else {
+ alloc->free_blocks[0].size = ggml_backend_buffer_get_size(alloc->buffer) - align_offset;
}
- alloc->parse_seq_len = n;
}
-void ggml_allocr_reset(struct ggml_allocr * alloc) {
- alloc->n_free_blocks = 1;
- size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment);
- alloc->free_blocks[0].addr = (char *)alloc->data + align_offset;
- alloc->free_blocks[0].size = alloc->size - align_offset;
-}
+ggml_tallocr_t ggml_tallocr_new(void * data, size_t size, size_t alignment) {
+ struct ggml_backend_buffer * buffer = ggml_backend_cpu_buffer_from_ptr(NULL, data, size);
-struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment) {
- struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
+ ggml_tallocr_t alloc = (ggml_tallocr_t)malloc(sizeof(struct ggml_tallocr));
- *alloc = (struct ggml_allocr){
- /*.data = */ data,
- /*.size = */ size,
+ *alloc = (struct ggml_tallocr) {
+ /*.buffer = */ buffer,
+ /*.buffer_owned = */ true,
+ /*.base = */ ggml_backend_buffer_get_base(buffer),
/*.alignment = */ alignment,
/*.n_free_blocks = */ 0,
/*.free_blocks = */ {{0}},
- /*.hash_table = */ {{0}},
/*.max_size = */ 0,
/*.measure = */ false,
- /*.parse_seq = */ {0},
- /*.parse_seq_len = */ 0,
#ifdef GGML_ALLOCATOR_DEBUG
/*.allocated_tensors = */ {0},
#endif
};
- ggml_allocr_reset(alloc);
+ ggml_tallocr_reset(alloc);
return alloc;
}
-// OS specific functions to allocate and free uncommitted virtual memory
-static void * alloc_vmem(size_t size) {
-#if defined(_WIN32)
- return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
-#elif defined(_POSIX_MAPPED_FILES)
- void * ptr = mmap(NULL, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
- if (ptr == MAP_FAILED) {
- return NULL;
- }
- return ptr;
-#else
- // use a fixed address for other platforms
- uintptr_t base_addr = (uintptr_t)-size - 0x100;
- return (void *)base_addr;
-#endif
-}
+ggml_tallocr_t ggml_tallocr_new_measure(size_t alignment) {
+ ggml_tallocr_t alloc = ggml_tallocr_new((void *)0x1000, SIZE_MAX/2, alignment);
+ alloc->measure = true;
-static void free_vmem(void * base_addr, size_t size) {
-#if defined(_WIN32)
- VirtualFree(base_addr, 0, MEM_RELEASE);
- UNUSED(size);
-#elif defined(_POSIX_MAPPED_FILES)
- munmap(base_addr, size);
-#else
- // nothing to do
- UNUSED(base_addr);
- UNUSED(size);
-#endif
+ return alloc;
}
-// allocate uncommitted virtual memory to measure the size of the graph
-static void alloc_measure_vmem(void ** base_addr, size_t * size) {
- // 128GB for 64-bit, 1GB for 32-bit
- *size = sizeof(void *) == 4 ? 1ULL<<30 : 1ULL<<37;
- do {
- *base_addr = alloc_vmem(*size);
- if (*base_addr != NULL) {
- AT_PRINTF("allocated %.2f GB of virtual memory for measure buffer at %p\n", *size / 1024.0 / 1024.0 / 1024.0, *base_addr);
- return;
- }
- // try again with half the size
- *size /= 2;
- } while (*size > 0);
+ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backend) {
+ // create a backend buffer to get the correct tensor allocation sizes
+ ggml_backend_buffer_t buffer = ggml_backend_alloc_buffer(backend, 1);
- GGML_ASSERT(!"failed to allocate virtual memory for measure buffer");
+ // TODO: move alloc initialization to a common ggml_tallocr_new_impl function
+ ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(buffer);
+ alloc->buffer_owned = true;
+ alloc->measure = true;
+ ggml_tallocr_reset(alloc);
+ return alloc;
}
-static void free_measure_vmem(void * base_addr, size_t size) {
- free_vmem(base_addr, size);
+ggml_tallocr_t ggml_tallocr_new_from_backend(struct ggml_backend * backend, size_t size) {
+ ggml_backend_buffer_t buffer = ggml_backend_alloc_buffer(backend, size);
+ ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(buffer);
+ alloc->buffer_owned = true;
+ return alloc;
}
-struct ggml_allocr * ggml_allocr_new_measure(size_t alignment) {
- struct ggml_allocr * alloc = (struct ggml_allocr *)malloc(sizeof(struct ggml_allocr) /* + n_free_blocks * sizeof(struct free_block) */);
+ggml_tallocr_t ggml_tallocr_new_from_buffer(struct ggml_backend_buffer * buffer) {
+ ggml_tallocr_t alloc = (ggml_tallocr_t)malloc(sizeof(struct ggml_tallocr));
- void * base_addr;
- size_t size;
-
- alloc_measure_vmem(&base_addr, &size);
-
- *alloc = (struct ggml_allocr){
- /*.data = */ base_addr,
- /*.size = */ size,
- /*.alignment = */ alignment,
+ *alloc = (struct ggml_tallocr) {
+ /*.buffer = */ buffer,
+ /*.buffer_owned = */ false,
+ /*.base = */ ggml_backend_buffer_get_base(buffer),
+ /*.alignment = */ ggml_backend_buffer_get_alignment(buffer),
/*.n_free_blocks = */ 0,
/*.free_blocks = */ {{0}},
- /*.hash_table = */ {{0}},
/*.max_size = */ 0,
- /*.measure = */ true,
- /*.parse_seq = */ {0},
- /*.parse_seq_len = */ 0,
+ /*.measure = */ false,
#ifdef GGML_ALLOCATOR_DEBUG
/*.allocated_tensors = */ {0},
#endif
};
- ggml_allocr_reset(alloc);
+ ggml_tallocr_reset(alloc);
return alloc;
}
-void ggml_allocr_free(struct ggml_allocr * alloc) {
- if (alloc->measure) {
- free_measure_vmem(alloc->data, alloc->size);
+struct ggml_backend_buffer * ggml_tallocr_get_buffer(ggml_tallocr_t alloc) {
+ return alloc->buffer;
+}
+
+void ggml_tallocr_free(ggml_tallocr_t alloc) {
+ if (alloc == NULL) {
+ return;
+ }
+
+ if (alloc->buffer_owned) {
+ ggml_backend_buffer_free(alloc->buffer);
}
free(alloc);
}
-bool ggml_allocr_is_measure(struct ggml_allocr * alloc) {
+bool ggml_tallocr_is_measure(ggml_tallocr_t alloc) {
return alloc->measure;
}
-//////////// compute graph allocator
+size_t ggml_tallocr_max_size(ggml_tallocr_t alloc) {
+ return alloc->max_size;
+}
+
+// graph allocator
+
+struct hash_node {
+ int n_children;
+ int n_views;
+};
+
+struct ggml_gallocr {
+ ggml_tallocr_t talloc;
+ struct ggml_hash_set hash_set;
+ struct hash_node * hash_values;
+ size_t hash_values_size;
+ ggml_tallocr_t * hash_allocs;
+ int * parse_seq;
+ int parse_seq_len;
+};
+
+ggml_gallocr_t ggml_gallocr_new(void) {
+ ggml_gallocr_t galloc = (ggml_gallocr_t)malloc(sizeof(struct ggml_gallocr));
+
+ *galloc = (struct ggml_gallocr) {
+ /*.talloc = */ NULL,
+ /*.hash_set = */ {0},
+ /*.hash_values = */ NULL,
+ /*.hash_values_size = */ 0,
+ /*.hash_allocs = */ NULL,
+ /*.parse_seq = */ NULL,
+ /*.parse_seq_len = */ 0,
+ };
+
+ return galloc;
+}
+
+void ggml_gallocr_free(ggml_gallocr_t galloc) {
+ if (galloc == NULL) {
+ return;
+ }
+
+ if (galloc->hash_set.keys != NULL) {
+ free(galloc->hash_set.keys);
+ }
+ if (galloc->hash_values != NULL) {
+ free(galloc->hash_values);
+ }
+ if (galloc->hash_allocs != NULL) {
+ free(galloc->hash_allocs);
+ }
+ if (galloc->parse_seq != NULL) {
+ free(galloc->parse_seq);
+ }
+ free(galloc);
+}
+
+void ggml_gallocr_set_parse_seq(ggml_gallocr_t galloc, const int * list, int n) {
+ free(galloc->parse_seq);
+ galloc->parse_seq = malloc(sizeof(int) * n);
+
+ for (int i = 0; i < n; i++) {
+ galloc->parse_seq[i] = list[i];
+ }
+ galloc->parse_seq_len = n;
+}
+
+static struct hash_node * hash_get(ggml_gallocr_t galloc, struct ggml_tensor * t) {
+ size_t i = ggml_hash_find_or_insert(galloc->hash_set, t);
+ return &galloc->hash_values[i];
+}
static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
if (a->type != b->type) {
case GGML_OP_ROPE:
case GGML_OP_RMS_NORM:
case GGML_OP_SOFT_MAX:
- case GGML_OP_CONT:
return true;
default:
}
}
-static void allocate_node(struct ggml_allocr * alloc, struct ggml_tensor * node) {
- struct hash_node * ht = alloc->hash_table;
+static ggml_tallocr_t node_tallocr(ggml_gallocr_t galloc, struct ggml_tensor * node) {
+ if (galloc->talloc != NULL) {
+ return galloc->talloc;
+ }
+
+ return galloc->hash_allocs[ggml_hash_find_or_insert(galloc->hash_set, node)];
+}
+
+static void init_view(ggml_gallocr_t galloc, struct ggml_tensor * view) {
+ ggml_tallocr_t alloc = node_tallocr(galloc, view);
+
+ //printf("init_view: %s from src %s\n", view->name, view->view_src->name);
+ GGML_ASSERT(view->view_src != NULL && view->view_src->data != NULL);
+ view->backend = view->view_src->backend;
+ view->buffer = view->view_src->buffer;
+ view->data = (char *)view->view_src->data + view->view_offs;
+
+ // FIXME: the view should be initialized by the owning buffer, but currently this breaks the CUDA backend
+ // due to the ggml_tensor_extra_gpu ring buffer overwriting the KV cache extras
+ assert(ggml_tallocr_is_measure(alloc) || !view->buffer || view->buffer->backend == alloc->buffer->backend);
+
+ if (!alloc->measure) {
+ ggml_backend_buffer_init_tensor(alloc->buffer, view);
+ }
+}
+
+static void allocate_node(ggml_gallocr_t galloc, struct ggml_tensor * node) {
+ ggml_tallocr_t alloc = node_tallocr(galloc, node);
+
if (node->data == NULL) {
if (ggml_is_view(node)) {
- assert(node->view_src->data != NULL);
- node->data = (char *)node->view_src->data + node->view_offs;
+ init_view(galloc, node);
} else {
// see if we can reuse a parent's buffer (inplace)
if (ggml_op_can_inplace(node->op)) {
}
// if the node's data is external, then we cannot re-use it
- if (ggml_allocr_is_own(alloc, parent) == false) {
+ if (ggml_tallocr_is_own(alloc, parent) == false) {
AT_PRINTF("not reusing parent %s for %s as %p is external\n", parent->name, node->name, parent->data);
continue;
}
- struct hash_node * p_hn = hash_get(ht, parent);
+ struct hash_node * p_hn = hash_get(galloc, parent);
if (parent->data != NULL && p_hn->n_children == 1 && p_hn->n_views == 0 && ggml_are_same_layout(node, parent)) {
if (ggml_is_view(parent)) {
struct ggml_tensor * view_src = parent->view_src;
- struct hash_node * view_src_hn = hash_get(ht, view_src);
+ struct hash_node * view_src_hn = hash_get(galloc, view_src);
if (view_src_hn->n_views == 1 && view_src_hn->n_children == 0 && view_src->data == parent->data) {
// TODO: the offset of the view parent must be kept to ensure that the op doesn't overwrite
// the parent's data that it will need later (same layout requirement). the problem is that then
// adding a view_src pointer to the tensor would solve this and simplify the code dealing with views
// for now, we only reuse the parent's data if the offset is zero (view_src->data == parent->data)
AT_PRINTF("reusing view parent %s (%s) for %s\n", parent->name, view_src->name, node->name);
- node->data = parent->data;
+ node->view_src = view_src;
+ view_src_hn->n_views += 1;
+ init_view(galloc, node);
return;
}
}
else {
AT_PRINTF("reusing parent %s for %s\n", parent->name, node->name);
- node->data = parent->data;
+ node->view_src = parent;
+ p_hn->n_views += 1;
+ init_view(galloc, node);
return;
}
}
}
}
- ggml_allocr_alloc(alloc, node);
+ ggml_tallocr_alloc(alloc, node);
}
}
}
-static size_t ggml_allocr_alloc_graph_tensors_n(
- struct ggml_allocr * alloc,
- struct ggml_cgraph ** graphs, int n_graphs,
- struct ggml_tensor *** inputs, struct ggml_tensor *** outputs) {
+static void free_node(ggml_gallocr_t galloc, struct ggml_tensor * node) {
+ ggml_tallocr_t alloc = node_tallocr(galloc, node);
- // reset hash table
- struct hash_node * ht = alloc->hash_table;
- memset(ht, 0, sizeof(struct hash_node) * GGML_GRAPH_HASHTABLE_SIZE);
+ ggml_tallocr_free_tensor(alloc, node);
+}
+
+static void ggml_tallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgraph * gf) {
+ const int * parse_seq = galloc->parse_seq;
+ int parse_seq_len = galloc->parse_seq_len;
// count number of children and views
- for (int g = 0; g < n_graphs; g++) {
- struct ggml_cgraph * gf = graphs[g];
- for (int i = 0; i < gf->n_nodes; i++) {
- struct ggml_tensor * node = gf->nodes[i];
+ for (int i = 0; i < gf->n_nodes; i++) {
+ struct ggml_tensor * node = gf->nodes[i];
+
+ if (ggml_is_view(node)) {
+ struct ggml_tensor * view_src = node->view_src;
+ hash_get(galloc, view_src)->n_views += 1;
+ if (node->buffer == NULL && node->data != NULL) {
+ // view of a pre-allocated tensor, didn't call init_view() yet
+ init_view(galloc, node);
+ }
+ }
- if (ggml_is_view(node)) {
- struct ggml_tensor * view_src = node->view_src;
- hash_get(ht, view_src)->n_views += 1;
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * parent = node->src[j];
+ if (parent == NULL) {
+ break;
}
+ hash_get(galloc, parent)->n_children += 1;
+ if (ggml_is_view(parent) && parent->buffer == NULL && parent->data != NULL) {
+ init_view(galloc, parent);
+ }
+ }
+ }
+ // allocate tensors
+ // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers
+ int last_barrier_pos = 0;
+ int n_nodes = parse_seq_len ? parse_seq_len : gf->n_nodes;
+
+ for (int ind = 0; ind < n_nodes; ind++) {
+ // allocate a node if there is no parse_seq or this is not a barrier
+ if (parse_seq_len == 0 || parse_seq[ind] != -1) {
+ int i = parse_seq_len ? parse_seq[ind] : ind;
+ struct ggml_tensor * node = gf->nodes[i];
+
+ // allocate parents (leafs)
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * parent = node->src[j];
if (parent == NULL) {
break;
}
- hash_get(ht, parent)->n_children += 1;
+ allocate_node(galloc, parent);
}
- }
- }
- // allocate tensors
- for (int g = 0; g < n_graphs; g++) {
- struct ggml_cgraph * gf = graphs[g];
- AT_PRINTF("####### graph %d/%d\n", g, n_graphs);
- // graph inputs are allocated first to ensure that they are not overwritten by each other
- if (inputs != NULL && inputs[g] != NULL) {
- for (int i = 0; inputs[g][i] != NULL; i++) {
- struct ggml_tensor * input = inputs[g][i];
- AT_PRINTF("input: %s\n", input->name);
- allocate_node(alloc, input);
+ // allocate node
+ allocate_node(galloc, node);
+
+ AT_PRINTF("exec: %s (%s) <= ", ggml_op_name(node->op), node->name);
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * parent = node->src[j];
+ if (parent == NULL) {
+ break;
+ }
+ AT_PRINTF("%s", parent->name);
+ if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
+ AT_PRINTF(", ");
+ }
}
+ AT_PRINTF("\n");
}
- // if we have parse_seq then we allocate nodes following the list, and we only free nodes at barriers
- int last_barrier_pos = 0;
- int n_nodes = alloc->parse_seq_len ? alloc->parse_seq_len : gf->n_nodes;
- for (int ind = 0; ind < n_nodes; ind++) {
- // allocate a node if there is no parse_seq or this is not a barrier
- if ((alloc->parse_seq_len==0) || alloc->parse_seq[ind] != -1) {
- int i = alloc->parse_seq_len ? alloc->parse_seq[ind] : ind;
- struct ggml_tensor * node = gf->nodes[i];
+ // update parents
+ // update immediately if there is no parse_seq
+ // update only at barriers if there is parse_seq
+ if ((parse_seq_len == 0) || parse_seq[ind] == -1) {
+ int update_start = parse_seq_len ? last_barrier_pos : ind;
+ int update_end = parse_seq_len ? ind : ind + 1;
+ for (int i = update_start; i < update_end; i++) {
+ int node_i = parse_seq_len ? parse_seq[i] : i;
+ struct ggml_tensor * node = gf->nodes[node_i];
- // allocate parents (leafs)
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * parent = node->src[j];
if (parent == NULL) {
break;
}
- allocate_node(alloc, parent);
- }
+ struct hash_node * p_hn = hash_get(galloc, parent);
+ p_hn->n_children -= 1;
- // allocate node
- allocate_node(alloc, node);
+ //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
- AT_PRINTF("exec: %s (%s) <= ", ggml_op_name(node->op), node->name);
- for (int j = 0; j < GGML_MAX_SRC; j++) {
- struct ggml_tensor * parent = node->src[j];
- if (parent == NULL) {
- break;
- }
- AT_PRINTF("%s", parent->name);
- if (j < GGML_MAX_SRC - 1 && node->src[j + 1] != NULL) {
- AT_PRINTF(", ");
- }
- }
- AT_PRINTF("\n");
- }
-
- // update parents
- // update immediately if there is no parse_seq
- // update only at barriers if there is parse_seq
- if ((alloc->parse_seq_len == 0) || alloc->parse_seq[ind] == -1) {
- int update_start = alloc->parse_seq_len ? last_barrier_pos : ind;
- int update_end = alloc->parse_seq_len ? ind : ind + 1;
- for (int i = update_start; i < update_end; i++) {
- int node_i = alloc->parse_seq_len ? alloc->parse_seq[i] : i;
- struct ggml_tensor * node = gf->nodes[node_i];
-
- for (int j = 0; j < GGML_MAX_SRC; j++) {
- struct ggml_tensor * parent = node->src[j];
- if (parent == NULL) {
- break;
- }
- struct hash_node * p_hn = hash_get(ht, parent);
- p_hn->n_children -= 1;
-
- //AT_PRINTF("parent %s: %d children, %d views\n", parent->name, parent->n_children, parent->n_views);
-
- if (p_hn->n_children == 0 && p_hn->n_views == 0) {
- if (ggml_is_view(parent)) {
- struct ggml_tensor * view_src = parent->view_src;
- struct hash_node * view_src_hn = hash_get(ht, view_src);
- view_src_hn->n_views -= 1;
- AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views);
- if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src->data != node->data) {
- ggml_allocr_free_tensor(alloc, view_src);
- }
- }
- else {
- if (parent->data != node->data) {
- ggml_allocr_free_tensor(alloc, parent);
- }
+ if (p_hn->n_children == 0 && p_hn->n_views == 0) {
+ if (ggml_is_view(parent)) {
+ struct ggml_tensor * view_src = parent->view_src;
+ struct hash_node * view_src_hn = hash_get(galloc, view_src);
+ view_src_hn->n_views -= 1;
+ AT_PRINTF("view_src %s: %d children, %d views\n", view_src->name, view_src_hn->n_children, view_src_hn->n_views);
+ if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0) {
+ free_node(galloc, view_src);
}
}
+ else {
+ free_node(galloc, parent);
+ }
}
}
- AT_PRINTF("\n");
- if (alloc->parse_seq_len) {
- last_barrier_pos = ind + 1;
- }
}
- }
- // free graph outputs here that wouldn't be freed otherwise because they have no children
- if (outputs != NULL && outputs[g] != NULL) {
- for (int i = 0; outputs[g][i] != NULL; i++) {
- struct ggml_tensor * output = outputs[g][i];
- AT_PRINTF("output: %s\n", output->name);
- ggml_allocr_free_tensor(alloc, output);
+ AT_PRINTF("\n");
+ if (parse_seq_len) {
+ last_barrier_pos = ind + 1;
}
}
}
+}
- return alloc->max_size;
+size_t ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, ggml_tallocr_t talloc, struct ggml_cgraph * graph) {
+ size_t hash_size = graph->visited_hash_table.size;
+
+ // check if the hash table is initialized and large enough
+ if (galloc->hash_set.size < hash_size) {
+ if (galloc->hash_set.keys != NULL) {
+ free(galloc->hash_set.keys);
+ }
+ if (galloc->hash_values != NULL) {
+ free(galloc->hash_values);
+ }
+ galloc->hash_set.keys = malloc(sizeof(struct ggml_tensor *) * hash_size);
+ galloc->hash_set.size = hash_size;
+ galloc->hash_values = malloc(sizeof(struct hash_node) * hash_size);
+ }
+
+ // reset hash table
+ memset(galloc->hash_set.keys, 0, sizeof(struct ggml_tensor *) * hash_size);
+ memset(galloc->hash_values, 0, sizeof(struct hash_node) * hash_size);
+
+ galloc->talloc = talloc;
+ ggml_tallocr_alloc_graph_impl(galloc, graph);
+ galloc->talloc = NULL;
+
+ size_t max_size = ggml_tallocr_max_size(talloc);
+
+ return max_size;
+}
+
+void ggml_gallocr_alloc_graph_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, struct ggml_hash_set hash_set, ggml_tallocr_t * hash_node_alloct) {
+ const size_t hash_size = hash_set.size;
+
+ GGML_ASSERT(hash_size >= (size_t)(graph->n_nodes + graph->n_leafs));
+
+ galloc->talloc = NULL;
+
+ // alloc hash_values if needed
+ if (galloc->hash_values == NULL || galloc->hash_values_size < hash_size) {
+ free(galloc->hash_values);
+ galloc->hash_values = malloc(sizeof(struct hash_node) * hash_size);
+ galloc->hash_values_size = hash_size;
+ }
+
+ // free hash_set.keys if needed
+ if (galloc->hash_set.keys != NULL) {
+ free(galloc->hash_set.keys);
+ }
+ galloc->hash_set = hash_set;
+
+ // reset hash values
+ memset(galloc->hash_values, 0, sizeof(struct hash_node) * hash_size);
+
+ galloc->hash_allocs = hash_node_alloct;
+
+ ggml_tallocr_alloc_graph_impl(galloc, graph);
+
+ // remove unowned resources
+ galloc->hash_set.keys = NULL;
+ galloc->hash_allocs = NULL;
+}
+
+// legacy API wrapper
+
+struct ggml_allocr {
+ ggml_tallocr_t talloc;
+ ggml_gallocr_t galloc;
+};
+
+static ggml_allocr_t ggml_allocr_new_impl(ggml_tallocr_t talloc) {
+ ggml_allocr_t alloc = (ggml_allocr_t)malloc(sizeof(struct ggml_allocr));
+ *alloc = (struct ggml_allocr) {
+ /*.talloc = */ talloc,
+ /*.galloc = */ ggml_gallocr_new(),
+ };
+ return alloc;
+}
+
+ggml_allocr_t ggml_allocr_new(void * data, size_t size, size_t alignment) {
+ return ggml_allocr_new_impl(ggml_tallocr_new(data, size, alignment));
+}
+
+ggml_allocr_t ggml_allocr_new_measure(size_t alignment) {
+ return ggml_allocr_new_impl(ggml_tallocr_new_measure(alignment));
+}
+
+ggml_allocr_t ggml_allocr_new_from_buffer(struct ggml_backend_buffer * buffer) {
+ return ggml_allocr_new_impl(ggml_tallocr_new_from_buffer(buffer));
+}
+
+ggml_allocr_t ggml_allocr_new_from_backend(struct ggml_backend * backend, size_t size) {
+ return ggml_allocr_new_impl(ggml_tallocr_new_from_backend(backend, size));
+}
+
+ggml_allocr_t ggml_allocr_new_measure_from_backend(struct ggml_backend * backend) {
+ return ggml_allocr_new_impl(ggml_tallocr_new_measure_from_backend(backend));
+}
+
+struct ggml_backend_buffer * ggml_allocr_get_buffer(ggml_allocr_t alloc) {
+ return ggml_tallocr_get_buffer(alloc->talloc);
+}
+
+void ggml_allocr_set_parse_seq(ggml_allocr_t alloc, const int * list, int n) {
+ ggml_gallocr_set_parse_seq(alloc->galloc, list, n);
+}
+
+void ggml_allocr_free(ggml_allocr_t alloc) {
+ ggml_gallocr_free(alloc->galloc);
+ ggml_tallocr_free(alloc->talloc);
+ free(alloc);
+}
+
+bool ggml_allocr_is_measure(ggml_allocr_t alloc) {
+ return ggml_tallocr_is_measure(alloc->talloc);
+}
+
+void ggml_allocr_reset(ggml_allocr_t alloc) {
+ ggml_tallocr_reset(alloc->talloc);
+}
+
+void ggml_allocr_alloc(ggml_allocr_t alloc, struct ggml_tensor * tensor) {
+ ggml_tallocr_alloc(alloc->talloc, tensor);
+}
+
+size_t ggml_allocr_max_size(ggml_allocr_t alloc) {
+ return ggml_tallocr_max_size(alloc->talloc);
}
-size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph) {
- return ggml_allocr_alloc_graph_tensors_n(alloc, &graph, 1, NULL, NULL);
+size_t ggml_allocr_alloc_graph(ggml_allocr_t alloc, struct ggml_cgraph * graph) {
+ return ggml_gallocr_alloc_graph(alloc->galloc, alloc->talloc, graph);
}
extern "C" {
#endif
+struct ggml_backend;
+struct ggml_backend_buffer;
-GGML_API struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment);
-GGML_API struct ggml_allocr * ggml_allocr_new_measure(size_t alignment);
+//
+// Legacy API
+//
+
+typedef struct ggml_allocr * ggml_allocr_t;
+
+// initialize allocator for use with CPU backend only
+GGML_API ggml_allocr_t ggml_allocr_new(void * data, size_t size, size_t alignment);
+GGML_API ggml_allocr_t ggml_allocr_new_measure(size_t alignment);
+
+// initialize allocator for use with ggml-backend
+GGML_API ggml_allocr_t ggml_allocr_new_from_buffer(struct ggml_backend_buffer * buffer);
+GGML_API ggml_allocr_t ggml_allocr_new_from_backend(struct ggml_backend * backend, size_t size); // allocates an owned buffer
+GGML_API ggml_allocr_t ggml_allocr_new_measure_from_backend(struct ggml_backend * backend);
+
+GGML_API struct ggml_backend_buffer * ggml_allocr_get_buffer(ggml_allocr_t alloc);
// tell the allocator to parse nodes following the order described in the list
// you should call this if your graph are optimized to execute out-of-order
-GGML_API void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, const int * list, int n);
+GGML_API void ggml_allocr_set_parse_seq(ggml_allocr_t alloc, const int * list, int n);
+
+GGML_API void ggml_allocr_free (ggml_allocr_t alloc);
+GGML_API bool ggml_allocr_is_measure (ggml_allocr_t alloc);
+GGML_API void ggml_allocr_reset (ggml_allocr_t alloc);
+GGML_API void ggml_allocr_alloc (ggml_allocr_t alloc, struct ggml_tensor * tensor);
+GGML_API size_t ggml_allocr_max_size (ggml_allocr_t alloc);
+
+GGML_API size_t ggml_allocr_alloc_graph(ggml_allocr_t alloc, struct ggml_cgraph * graph);
+
+//
+// ggml-backend v2 API
+//
+
+// Seperate tensor and graph allocator objects
+// This is necessary for multi-backend allocation because the graph allocator needs to use multiple tensor allocators
+// The original API is kept as a wrapper around the new API
+
+// Tensor allocator
+typedef struct ggml_tallocr * ggml_tallocr_t;
+
+GGML_API ggml_tallocr_t ggml_tallocr_new(void * data, size_t size, size_t alignment);
+GGML_API ggml_tallocr_t ggml_tallocr_new_measure(size_t alignment);
+GGML_API ggml_tallocr_t ggml_tallocr_new_from_buffer(struct ggml_backend_buffer * buffer);
+GGML_API ggml_tallocr_t ggml_tallocr_new_from_backend(struct ggml_backend * backend, size_t size); // allocates an owned buffer
+GGML_API ggml_tallocr_t ggml_tallocr_new_measure_from_backend(struct ggml_backend * backend);
+
+GGML_API struct ggml_backend_buffer * ggml_tallocr_get_buffer(ggml_tallocr_t talloc);
+
+GGML_API void ggml_tallocr_free (ggml_tallocr_t talloc);
+GGML_API bool ggml_tallocr_is_measure (ggml_tallocr_t talloc);
+GGML_API void ggml_tallocr_reset (ggml_tallocr_t talloc);
+GGML_API void ggml_tallocr_alloc (ggml_tallocr_t talloc, struct ggml_tensor * tensor);
+GGML_API size_t ggml_tallocr_max_size (ggml_tallocr_t talloc);
+
+
+// Graph allocator
+typedef struct ggml_gallocr * ggml_gallocr_t;
+
+GGML_API ggml_gallocr_t ggml_gallocr_new(void);
+GGML_API void ggml_gallocr_free(ggml_gallocr_t galloc);
-GGML_API void ggml_allocr_free(struct ggml_allocr * alloc);
-GGML_API bool ggml_allocr_is_measure(struct ggml_allocr * alloc);
-GGML_API void ggml_allocr_reset(struct ggml_allocr * alloc);
-GGML_API void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor);
-GGML_API size_t ggml_allocr_alloc_graph(struct ggml_allocr * alloc, struct ggml_cgraph * graph);
+GGML_API void ggml_gallocr_set_parse_seq(ggml_gallocr_t galloc, const int * list, int n);
+GGML_API size_t ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, ggml_tallocr_t talloc, struct ggml_cgraph * graph);
+// Allocate tensors from the allocators given by the hash table
+GGML_API void ggml_gallocr_alloc_graph_n(
+ ggml_gallocr_t galloc,
+ struct ggml_cgraph * graph,
+ struct ggml_hash_set hash_set,
+ ggml_tallocr_t * hash_node_talloc);
#ifdef __cplusplus
}
--- /dev/null
+#pragma once
+
+// ggml-backend internal header
+
+#include "ggml-backend.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ //
+ // Backend buffer
+ //
+
+ typedef void * ggml_backend_buffer_context_t;
+
+ struct ggml_backend_buffer_i {
+ void (*free_buffer) (ggml_backend_buffer_t buffer);
+ void * (*get_base) (ggml_backend_buffer_t buffer); // get base pointer
+ size_t (*get_alloc_size)(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-allocation callback
+ void (*init_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // post-allocation callback
+ void (*free_tensor) (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor); // pre-free callback
+ };
+
+ struct ggml_backend_buffer {
+ struct ggml_backend_buffer_i iface;
+
+ ggml_backend_t backend;
+ ggml_backend_buffer_context_t context;
+
+ size_t size;
+ };
+
+ GGML_API ggml_backend_buffer_t ggml_backend_buffer_init(
+ struct ggml_backend * backend,
+ struct ggml_backend_buffer_i iface,
+ ggml_backend_buffer_context_t context,
+ size_t size);
+
+ //
+ // Backend
+ //
+
+ typedef void * ggml_backend_context_t;
+
+ struct ggml_backend_i {
+ const char * (*get_name)(ggml_backend_t backend);
+
+ void (*free)(ggml_backend_t backend);
+
+ // buffer allocation
+ ggml_backend_buffer_t (*alloc_buffer)(ggml_backend_t backend, size_t size);
+
+ // get buffer alignment
+ size_t (*get_alignment)(ggml_backend_t backend);
+
+ // tensor data access
+ // these functions can be asynchronous, helper functions are provided for synchronous access that automatically call synchronize
+ void (*set_tensor_async)(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
+ void (*get_tensor_async)(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
+ void (*synchronize) (ggml_backend_t backend);
+
+ // (optional) copy tensor between different backends, allow for single-copy tranfers
+ void (*cpy_tensor_from)(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
+ void (*cpy_tensor_to) (ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst);
+
+ // compute graph with a plan
+ ggml_backend_graph_plan_t (*graph_plan_create) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
+ void (*graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
+ void (*graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
+
+ // compute graph without a plan
+ void (*graph_compute)(ggml_backend_t backend, struct ggml_cgraph * cgraph);
+
+ // check if the backend supports an operation
+ bool (*supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
+ };
+
+ struct ggml_backend {
+ struct ggml_backend_i iface;
+
+ ggml_backend_context_t context;
+ };
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+#include "ggml-backend-impl.h"
+#include "ggml-alloc.h"
+#include "ggml-impl.h"
+
+#include <assert.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define UNUSED GGML_UNUSED
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+// backend buffer
+
+ggml_backend_buffer_t ggml_backend_buffer_init(
+ struct ggml_backend * backend,
+ struct ggml_backend_buffer_i iface,
+ ggml_backend_buffer_context_t context,
+ size_t size) {
+ ggml_backend_buffer_t buffer = malloc(sizeof(struct ggml_backend_buffer));
+
+ GGML_ASSERT(iface.get_base != NULL);
+
+ (*buffer) = (struct ggml_backend_buffer) {
+ /* .interface = */ iface,
+ /* .backend = */ backend,
+ /* .context = */ context,
+ /* .size = */ size,
+ };
+
+ return buffer;
+}
+
+void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) {
+ if (buffer == NULL) {
+ return;
+ }
+
+ if (buffer->iface.free_buffer != NULL) {
+ buffer->iface.free_buffer(buffer);
+ }
+ free(buffer);
+}
+
+size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) {
+ return ggml_backend_get_alignment(buffer->backend);
+}
+
+size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) {
+ return buffer->size;
+}
+
+void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) {
+ void * base = buffer->iface.get_base(buffer);
+
+ GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL");
+
+ return base;
+}
+
+size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
+ // get_alloc_size is optional, defaults to ggml_nbytes
+ if (buffer->iface.get_alloc_size) {
+ return buffer->iface.get_alloc_size(buffer, tensor);
+ }
+ return ggml_nbytes(tensor);
+}
+
+void ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
+ // init_tensor is optional
+ if (buffer->iface.init_tensor) {
+ buffer->iface.init_tensor(buffer, tensor);
+ }
+}
+
+void ggml_backend_buffer_free_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
+ // free_tensor is optional
+ if (buffer->iface.free_tensor) {
+ buffer->iface.free_tensor(buffer, tensor);
+ }
+}
+
+// backend
+
+ggml_backend_t ggml_get_backend(const struct ggml_tensor * tensor) {
+ return tensor->buffer ? tensor->buffer->backend : NULL;
+}
+
+const char * ggml_backend_name(ggml_backend_t backend) {
+ if (backend == NULL) {
+ return "NULL";
+ }
+ return backend->iface.get_name(backend);
+}
+
+void ggml_backend_free(ggml_backend_t backend) {
+ if (backend == NULL) {
+ return;
+ }
+
+ backend->iface.free(backend);
+}
+
+ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) {
+ return backend->iface.alloc_buffer(backend, size);
+}
+
+size_t ggml_backend_get_alignment(ggml_backend_t backend) {
+ return backend->iface.get_alignment(backend);
+}
+
+void ggml_backend_tensor_set_async(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ ggml_get_backend(tensor)->iface.set_tensor_async(ggml_get_backend(tensor), tensor, data, offset, size);
+}
+
+void ggml_backend_tensor_get_async(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ ggml_get_backend(tensor)->iface.get_tensor_async(ggml_get_backend(tensor), tensor, data, offset, size);
+}
+
+void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ ggml_backend_t backend = ggml_get_backend(tensor);
+
+ GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
+ GGML_ASSERT(backend != NULL && "tensor backend not set");
+
+ backend->iface.set_tensor_async(backend, tensor, data, offset, size);
+ backend->iface.synchronize(backend);
+}
+
+void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ ggml_backend_t backend = ggml_get_backend(tensor);
+
+ GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
+ GGML_ASSERT(backend != NULL && "tensor backend not set");
+
+ backend->iface.get_tensor_async(backend, tensor, data, offset, size);
+ backend->iface.synchronize(backend);
+}
+
+void ggml_backend_synchronize(ggml_backend_t backend) {
+ backend->iface.synchronize(backend);
+}
+
+ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
+ return backend->iface.graph_plan_create(backend, cgraph);
+}
+
+void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
+ backend->iface.graph_plan_free(backend, plan);
+}
+
+void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
+ backend->iface.graph_plan_compute(backend, plan);
+}
+
+void ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
+ backend->iface.graph_compute(backend, cgraph);
+}
+
+bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
+ return backend->iface.supports_op(backend, op);
+}
+
+// backend copy
+
+static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
+ if (a->type != b->type) {
+ return false;
+ }
+ for (int i = 0; i < GGML_MAX_DIMS; i++) {
+ if (a->ne[i] != b->ne[i]) {
+ return false;
+ }
+ if (a->nb[i] != b->nb[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) {
+ //printf("src: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", src->name, (int)src->ne[0], (int)src->ne[1], (int)src->ne[2], (int)src->ne[3], (int)src->nb[0], (int)src->nb[1], (int)src->nb[2], (int)src->nb[3]);
+ //printf("dst: %s ne: [%d %d %d %d] nb: [%d %d %d %d]\n", dst->name, (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], (int)dst->nb[0], (int)dst->nb[1], (int)dst->nb[2], (int)dst->nb[3]);
+ GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
+
+ // fprintf(stderr, "cpy tensor %s from %s to %s (%lu bytes)\n", src->name, ggml_backend_name(src->backend), ggml_backend_name(dst->backend), ggml_nbytes(src));
+
+ if (src == dst) {
+ return;
+ }
+
+ // TODO: allow backends to support copy to/from same backend
+
+ if (ggml_get_backend(dst)->iface.cpy_tensor_from != NULL) {
+ ggml_get_backend(dst)->iface.cpy_tensor_from(ggml_get_backend(dst)->context, src, dst);
+ } else if (ggml_get_backend(src)->iface.cpy_tensor_to != NULL) {
+ ggml_get_backend(src)->iface.cpy_tensor_to(ggml_get_backend(src)->context, src, dst);
+ } else {
+ // shouldn't be hit when copying from/to CPU
+ #ifndef NDEBUG
+ fprintf(stderr, "ggml_backend_tensor_copy: neither cpy_tensor_from nor cpy_tensor_to are implemented for backends %s and %s, falling back to get/set\n", ggml_backend_name(src->buffer->backend), ggml_backend_name(dst->buffer->backend));
+ #endif
+ size_t nbytes = ggml_nbytes(src);
+ void * data = malloc(nbytes);
+ ggml_backend_tensor_get(src, data, 0, nbytes);
+ ggml_backend_tensor_set(dst, data, 0, nbytes);
+ free(data);
+ }
+}
+
+// backend CPU
+
+struct ggml_backend_cpu_context {
+ int n_threads;
+ void * work_data;
+ size_t work_size;
+};
+
+static const char * ggml_backend_cpu_name(ggml_backend_t backend) {
+ return "CPU";
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_free(ggml_backend_t backend) {
+ struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
+ free(cpu_ctx->work_data);
+ free(cpu_ctx);
+ free(backend);
+}
+
+static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
+ return (void *)buffer->context;
+}
+
+static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
+ free(buffer->context);
+ UNUSED(buffer);
+}
+
+static struct ggml_backend_buffer_i cpu_backend_buffer_i = {
+ /* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer,
+ /* .get_base = */ ggml_backend_cpu_buffer_get_base,
+ /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
+ /* .init_tensor = */ NULL, // no initialization required
+ /* .free_tensor = */ NULL, // no cleanup required
+};
+
+// for buffers from ptr, free is not called
+static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = {
+ /* .free_buffer = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed
+ /* .get_base = */ ggml_backend_cpu_buffer_get_base,
+ /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
+ /* .init_tensor = */ NULL,
+ /* .free_tensor = */ NULL,
+};
+
+static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512
+
+static ggml_backend_buffer_t ggml_backend_cpu_alloc_buffer(ggml_backend_t backend, size_t size) {
+ size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned
+ void * data = malloc(size); // TODO: maybe use GGML_ALIGNED_MALLOC?
+
+ GGML_ASSERT(data != NULL && "failed to allocate buffer");
+
+ return ggml_backend_buffer_init(backend, cpu_backend_buffer_i, data, size);
+}
+
+static size_t ggml_backend_cpu_get_alignment(ggml_backend_t backend) {
+ return TENSOR_ALIGNMENT;
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_set_tensor_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
+ GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
+
+ memcpy((char *)tensor->data + offset, data, size);
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_get_tensor_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
+ GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
+
+ memcpy(data, (const char *)tensor->data + offset, size);
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_synchronize(ggml_backend_t backend) {
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_cpy_tensor_from(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) {
+ ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_cpy_tensor_to(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) {
+ ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
+
+ UNUSED(backend);
+}
+
+struct ggml_backend_plan_cpu {
+ struct ggml_cplan cplan;
+ struct ggml_cgraph cgraph;
+};
+
+static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
+ struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
+
+ struct ggml_backend_plan_cpu * cpu_plan = malloc(sizeof(struct ggml_backend_plan_cpu));
+
+ cpu_plan->cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads);
+ cpu_plan->cgraph = *cgraph;
+
+ if (cpu_plan->cplan.work_size > 0) {
+ cpu_plan->cplan.work_data = malloc(cpu_plan->cplan.work_size);
+ }
+
+ return cpu_plan;
+}
+
+static void ggml_backend_cpu_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
+ struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan;
+
+ free(cpu_plan->cplan.work_data);
+ free(cpu_plan);
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
+ struct ggml_backend_plan_cpu * cpu_plan = (struct ggml_backend_plan_cpu *)plan;
+
+ ggml_graph_compute(&cpu_plan->cgraph, &cpu_plan->cplan);
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cpu_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
+ struct ggml_backend_cpu_context * cpu_ctx = (struct ggml_backend_cpu_context *)backend->context;
+
+ struct ggml_cplan cplan = ggml_graph_plan(cgraph, cpu_ctx->n_threads);
+
+ if (cpu_ctx->work_size < cplan.work_size) {
+ // TODO: may be faster to free and use malloc to avoid the copy
+ cpu_ctx->work_data = realloc(cpu_ctx->work_data, cplan.work_size);
+ cpu_ctx->work_size = cplan.work_size;
+ }
+
+ cplan.work_data = cpu_ctx->work_data;
+
+ ggml_graph_compute(cgraph, &cplan);
+}
+
+static bool ggml_backend_cpu_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
+ return true;
+ UNUSED(backend);
+ UNUSED(op);
+}
+
+static struct ggml_backend_i cpu_backend_i = {
+ /* .get_name = */ ggml_backend_cpu_name,
+ /* .free = */ ggml_backend_cpu_free,
+ /* .alloc_buffer = */ ggml_backend_cpu_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_cpu_get_alignment,
+ /* .set_tensor_async = */ ggml_backend_cpu_set_tensor_async,
+ /* .get_tensor_async = */ ggml_backend_cpu_get_tensor_async,
+ /* .synchronize = */ ggml_backend_cpu_synchronize,
+ /* .cpy_tensor_from = */ ggml_backend_cpu_cpy_tensor_from,
+ /* .cpy_tensor_to = */ ggml_backend_cpu_cpy_tensor_to,
+ /* .graph_plan_create = */ ggml_backend_cpu_graph_plan_create,
+ /* .graph_plan_free = */ ggml_backend_cpu_graph_plan_free,
+ /* .graph_plan_compute = */ ggml_backend_cpu_graph_plan_compute,
+ /* .graph_compute = */ ggml_backend_cpu_graph_compute,
+ /* .supports_op = */ ggml_backend_cpu_supports_op,
+};
+
+ggml_backend_t ggml_backend_cpu_init(void) {
+ struct ggml_backend_cpu_context * ctx = malloc(sizeof(struct ggml_backend_cpu_context));
+
+ ctx->n_threads = GGML_DEFAULT_N_THREADS;
+ ctx->work_data = NULL;
+ ctx->work_size = 0;
+
+ ggml_backend_t cpu_backend = malloc(sizeof(struct ggml_backend));
+
+ *cpu_backend = (struct ggml_backend) {
+ /* .interface = */ cpu_backend_i,
+ /* .context = */ ctx
+ };
+ return cpu_backend;
+}
+
+bool ggml_backend_is_cpu(ggml_backend_t backend) {
+ return backend->iface.get_name == ggml_backend_cpu_name;
+}
+
+void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads) {
+ GGML_ASSERT(ggml_backend_is_cpu(backend_cpu));
+
+ struct ggml_backend_cpu_context * ctx = (struct ggml_backend_cpu_context *)backend_cpu->context;
+ ctx->n_threads = n_threads;
+}
+
+ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(ggml_backend_t backend_cpu, void * ptr, size_t size) {
+ return ggml_backend_buffer_init(backend_cpu, cpu_backend_buffer_i_from_ptr, ptr, size);
+}
+
+// scheduler
+
+#define GGML_MAX_BACKENDS 4
+#define GGML_MAX_SPLITS 256
+#define GGML_MAX_SPLIT_INPUTS 16
+
+struct ggml_backend_sched_split {
+ ggml_tallocr_t tallocr;
+ int i_start;
+ int i_end;
+ struct ggml_tensor * inputs[GGML_MAX_SPLIT_INPUTS];
+ int n_inputs;
+ struct ggml_cgraph * graph;
+};
+
+struct ggml_backend_sched {
+ int n_backends;
+ ggml_backend_t backends[GGML_MAX_BACKENDS];
+ ggml_tallocr_t tallocs[GGML_MAX_BACKENDS];
+
+ ggml_gallocr_t galloc;
+
+ struct ggml_hash_set hash_set;
+ ggml_tallocr_t * node_talloc; // [hash_set.size]
+ struct ggml_tensor * (* node_copies)[GGML_MAX_BACKENDS]; // [hash_set.size][GGML_MAX_BACKENDS]
+
+ struct ggml_cgraph * graph;
+ struct ggml_backend_sched_split splits[GGML_MAX_SPLITS];
+ int n_splits;
+
+ struct ggml_context * ctx;
+
+ // align context_buffer to GGML_MEM_ALIGN
+ #ifdef _MSC_VER
+ __declspec(align(GGML_MEM_ALIGN))
+ #else
+ __attribute__((aligned(GGML_MEM_ALIGN)))
+ #endif
+ char context_buffer[GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS*sizeof(struct ggml_tensor) + GGML_MAX_SPLITS*sizeof(struct ggml_cgraph)];
+};
+
+#define hash_id(node) ggml_hash_find_or_insert(sched->hash_set, node)
+#define node_allocr(node) sched->node_talloc[hash_id(node)]
+
+static bool ggml_is_view_op(enum ggml_op op) {
+ return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
+}
+
+// returns the priority of the backend, lower is better
+static int sched_backend_prio(ggml_backend_sched_t sched, ggml_backend_t backend) {
+ for (int i = 0; i < sched->n_backends; i++) {
+ if (sched->backends[i] == backend) {
+ return i;
+ }
+ }
+ return INT_MAX;
+}
+
+static int sched_allocr_prio(ggml_backend_sched_t sched, ggml_tallocr_t allocr) {
+ for (int i = 0; i < sched->n_backends; i++) {
+ if (sched->tallocs[i] == allocr) {
+ return i;
+ }
+ }
+ return INT_MAX;
+}
+
+// returns the backend that should be used for the node based on the current locations
+char causes[GGML_DEFAULT_GRAPH_SIZE*4 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug, remove
+static ggml_backend_t sched_backend_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * node) {
+ // if the dst tensor is already allocated in a buffer, we must assume that it is critical to keep it there
+ // ie. kv cache updates
+ // note that this doesn't allow fallback to CPU. need to add output tensors to the splits to copy the data back to the original backend.
+ // dst
+ ggml_backend_t cur_backend = ggml_get_backend(node);
+ if (cur_backend != NULL) {
+ sprintf(causes[hash_id(node)], "1.dst");
+ return cur_backend;
+ }
+
+ // view_src
+ if (node->view_src != NULL && ggml_get_backend(node->view_src) != NULL) {
+ sprintf(causes[hash_id(node)], "1.vsrc");
+ return ggml_get_backend(node->view_src);
+ }
+
+ // src
+ int cur_prio = INT_MAX;
+ size_t cur_size = 0;
+
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
+ const struct ggml_tensor * src = node->src[i];
+ if (src == NULL) {
+ break;
+ }
+ ggml_backend_t src_backend = ggml_get_backend(src);
+ if (src_backend != NULL) {
+ int src_prio = sched_backend_prio(sched, src_backend);
+ size_t src_size = ggml_nbytes(src);
+ if (src_prio < cur_prio && src_size >= cur_size) {
+ cur_prio = src_prio;
+ cur_size = src_size;
+ cur_backend = src_backend;
+ sprintf(causes[hash_id(node)], "1.src%d", i);
+ }
+ }
+ }
+ return cur_backend;
+}
+
+static char * fmt_size(size_t size) {
+ static char buffer[128];
+ if (size >= 1024*1024) {
+ sprintf(buffer, "%zuM", size/1024/1024);
+ } else {
+ sprintf(buffer, "%zuK", size/1024);
+ }
+ return buffer;
+}
+
+static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
+ int cur_split = 0;
+ for (int i = 0; i < graph->n_nodes; i++) {
+ if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
+ ggml_backend_t split_backend = ggml_tallocr_get_buffer(sched->splits[cur_split].tallocr)->backend;
+ fprintf(stderr, "\n## SPLIT #%d: %s # %d inputs: ", cur_split, ggml_backend_name(split_backend), sched->splits[cur_split].n_inputs);
+ for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
+ fprintf(stderr, "[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name, fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j])));
+ }
+ fprintf(stderr, "\n");
+ cur_split++;
+ }
+ struct ggml_tensor * node = graph->nodes[i];
+ if (ggml_is_view_op(node->op)) {
+ continue;
+ }
+ ggml_tallocr_t node_allocr = node_allocr(node);
+ ggml_backend_t node_backend = node_allocr ? ggml_tallocr_get_buffer(node_allocr)->backend : NULL;
+ fprintf(stderr, "node #%3d (%10.10s): %20.20s (%4.4s) [%4.4s %8.8s]:", i, ggml_op_name(node->op), node->name, fmt_size(ggml_nbytes(node)), node_allocr ? ggml_backend_name(node_backend) : "NULL", causes[hash_id(node)]);
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ break;
+ }
+ ggml_tallocr_t src_allocr = node_allocr(src);
+ ggml_backend_t src_backend = src_allocr ? ggml_tallocr_get_buffer(src_allocr)->backend : NULL;
+ fprintf(stderr, " %20.20s (%4.4s) [%4.4s %8.8s]", src->name, fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", causes[hash_id(src)]);
+ }
+ fprintf(stderr, "\n");
+ }
+}
+
+// creates a copy of the tensor with the same memory layout
+static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) {
+ struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor);
+ for (int i = 0; i < GGML_MAX_DIMS; i++) {
+ dup->nb[i] = tensor->nb[i];
+ }
+ return dup;
+}
+
+// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
+// TODO: merge passes
+static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
+ // reset state
+ size_t hash_size = sched->hash_set.size;
+ memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size);
+ memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size);
+ memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size);
+ sched->n_splits = 0;
+
+ struct ggml_init_params params = {
+ /*.mem_size = */ sizeof(sched->context_buffer),
+ /*.mem_buffer = */ sched->context_buffer,
+ /*.no_alloc = */ true
+ };
+
+ if (sched->ctx != NULL) {
+ ggml_free(sched->ctx);
+ }
+
+ sched->ctx = ggml_init(params);
+
+ // pass 1: assign backends to ops with allocated inputs
+ for (int i = 0; i < graph->n_leafs; i++) {
+ struct ggml_tensor * leaf = graph->leafs[i];
+ if (node_allocr(leaf) != NULL) {
+ // do not overwrite user assignments
+ continue;
+ }
+ ggml_backend_t leaf_backend = ggml_get_backend(leaf);
+ if (leaf_backend == NULL && leaf->view_src != NULL) {
+ leaf_backend = ggml_get_backend(leaf->view_src);
+ }
+ if (leaf_backend != NULL) {
+ node_allocr(leaf) = ggml_backend_sched_get_tallocr(sched, leaf_backend);
+ }
+ }
+
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ if (node_allocr(node) != NULL) {
+ // do not overwrite user assignments
+ continue;
+ }
+ ggml_backend_t node_backend = sched_backend_from_cur(sched, node);
+ if (node_backend != NULL) {
+ node_allocr(node) = ggml_backend_sched_get_tallocr(sched, node_backend);
+ }
+ }
+ //printf("PASS 1 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+
+ // pass 2: assign backends to ops from current assignments
+ // TODO:
+ // - reuse sched_backend_from_cur
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ ggml_tallocr_t node_allocr = node_allocr(node);
+ if (node_allocr == NULL) {
+ int cur_prio = INT_MAX;
+ size_t cur_size = 0;
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ break;
+ }
+ ggml_tallocr_t src_allocr = node_allocr(src);
+ if (src_allocr != NULL) {
+ int src_prio = sched_allocr_prio(sched, src_allocr);
+ size_t src_size = ggml_nbytes(src);
+ if (src_prio < cur_prio && src_size >= cur_size) {
+ cur_prio = src_prio;
+ cur_size = src_size;
+ node_allocr = src_allocr;
+ sprintf(causes[hash_id(node)], "2.src%d", j);
+ }
+ }
+ }
+ if (node_allocr != NULL) {
+ node_allocr(node) = node_allocr;
+ }
+ }
+ }
+ //printf("PASS 2 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+
+ // pass 3: assign backends to remaining src from dst (should only be leafs)
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ ggml_tallocr_t node_allocr = node_allocr(node);
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ break;
+ }
+ ggml_tallocr_t src_allocr = node_allocr(src);
+ if (src_allocr == NULL) {
+ node_allocr(src) = node_allocr;
+ }
+ }
+ }
+ //printf("PASS 3 ASSIGNMENTS\n"); sched_print_assignments(sched, graph);
+
+ // pass 4: split graph, find tensors that need to be copied
+ // TODO:
+ // - when switching from a less preferred backend to a more preferred backend, check if it is possible to move the switch to an earlier point for the same cost
+ // find first backend
+ int cur_split = 0;
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ if (node->view_src == NULL) {
+ sched->splits[0].tallocr = node_allocr(node);
+ break;
+ }
+ }
+ sched->splits[0].i_start = 0;
+ sched->splits[0].n_inputs = 0;
+ memset(sched->splits[0].inputs, 0, sizeof(sched->splits[0].inputs)); //HACK
+ ggml_tallocr_t cur_allocr = sched->splits[0].tallocr;
+ size_t cur_backend_id = sched_allocr_prio(sched, cur_allocr);
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+
+ if (ggml_is_view_op(node->op)) {
+ continue;
+ }
+
+ ggml_tallocr_t node_allocr = node_allocr(node);
+
+ if (node_allocr != cur_allocr) {
+ sched->splits[cur_split].i_end = i;
+ cur_split++;
+ GGML_ASSERT(cur_split < GGML_MAX_SPLITS);
+ sched->splits[cur_split].tallocr = node_allocr;
+ sched->splits[cur_split].i_start = i;
+ sched->splits[cur_split].n_inputs = 0;
+ memset(sched->splits[cur_split].inputs, 0, sizeof(sched->splits[cur_split].inputs)); //HACK
+ cur_allocr = node_allocr;
+ cur_backend_id = sched_allocr_prio(sched, cur_allocr);
+ }
+
+ // find inputs that are not on the same backend
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ break;
+ }
+ ggml_tallocr_t src_allocr = node_allocr(src);
+ if (src_allocr != node_allocr) {
+ int n_inputs = sched->splits[cur_split].n_inputs++;
+ GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS);
+ sched->splits[cur_split].inputs[n_inputs] = (struct ggml_tensor *)src;
+
+ // create copies
+ size_t id = hash_id(src);
+ if (sched->node_copies[id][cur_backend_id] == NULL) {
+ struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
+ sched->node_copies[id][cur_backend_id] = tensor_copy;
+ node_allocr(tensor_copy) = cur_allocr;
+ ggml_backend_t backend = ggml_tallocr_get_buffer(cur_allocr)->backend;
+ ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name);
+ }
+ node->src[j] = sched->node_copies[id][cur_backend_id];
+ }
+ }
+ }
+ sched->splits[cur_split].i_end = graph->n_nodes;
+ sched->n_splits = cur_split + 1;
+
+ //fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); sched_print_assignments(sched, graph); fflush(stdout);
+
+#if 1
+ // sanity check: all sources should have the same backend as the node
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ ggml_tallocr_t node_allocr = node_allocr(node);
+ if (node_allocr == NULL) {
+ fprintf(stderr, "!!!!!!! %s has no backend\n", node->name);
+ }
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ break;
+ }
+ ggml_tallocr_t src_allocr = node_allocr(src);
+ if (src_allocr != node_allocr /* && src_backend != NULL */) { // ignore nulls for now
+ fprintf(stderr, "!!!! %s has backend %s, src %d (%s) has backend %s\n",
+ node->name, node_allocr ? ggml_backend_name(ggml_tallocr_get_buffer(node_allocr)->backend) : "NULL",
+ j, src->name, src_allocr ? ggml_backend_name(ggml_tallocr_get_buffer(src_allocr)->backend) : "NULL");
+ }
+ }
+ }
+#endif
+
+ // create copies of the graph for each split
+ // FIXME: avoid this copy, pass split inputs to ggml_gallocr_alloc_graph_n in some other way
+ struct ggml_cgraph * graph_copy = ggml_new_graph_custom(sched->ctx, graph->n_nodes + sched->n_splits*GGML_MAX_SPLIT_INPUTS, false);
+ for (int i = 0; i < sched->n_splits; i++) {
+ struct ggml_backend_sched_split * split = &sched->splits[i];
+ split->graph = ggml_graph_view(sched->ctx, graph, split->i_start, split->i_end);
+
+ // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split
+ for (int j = 0; j < split->n_inputs; j++) {
+ struct ggml_tensor * input = split->inputs[j];
+ struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_allocr_prio(sched, split->tallocr)];
+ input_cpy->src[0] = input;
+ graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
+ }
+
+ for (int j = split->i_start; j < split->i_end; j++) {
+ graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j];
+ }
+ }
+ sched->graph = graph_copy;
+}
+
+static void sched_alloc_splits(ggml_backend_sched_t sched) {
+ ggml_gallocr_alloc_graph_n(
+ sched->galloc,
+ sched->graph,
+ sched->hash_set,
+ sched->node_talloc);
+}
+
+static void sched_compute_splits(ggml_backend_sched_t sched) {
+ uint64_t copy_us[GGML_MAX_BACKENDS] = {0};
+ uint64_t compute_us[GGML_MAX_BACKENDS] = {0};
+
+ struct ggml_backend_sched_split * splits = sched->splits;
+
+ for (int i = 0; i < sched->n_splits; i++) {
+ struct ggml_backend_sched_split * split = &splits[i];
+ ggml_backend_t split_backend = ggml_tallocr_get_buffer(split->tallocr)->backend;
+ int split_backend_id = sched_backend_prio(sched, split_backend);
+
+ // copy the input tensors to the split backend
+ uint64_t copy_start_us = ggml_time_us();
+ for (int j = 0; j < split->n_inputs; j++) {
+ struct ggml_tensor * input_cpy = sched->node_copies[hash_id(split->inputs[j])][sched_backend_prio(sched, split_backend)];
+ if (split->inputs[j]->buffer == NULL) {
+ if (split->inputs[j]->view_src == NULL) {
+ fprintf(stderr, "input %s has no buffer and no view_src\n", split->inputs[j]->name);
+ exit(1);
+ }
+ struct ggml_tensor * view = split->inputs[j];
+ view->backend = view->view_src->backend;
+ view->buffer = view->view_src->buffer;
+ view->data = (char *)view->view_src->data + view->view_offs;
+ ggml_backend_buffer_init_tensor(ggml_backend_sched_get_buffer(sched, view->buffer->backend), view);
+ }
+ if (input_cpy->buffer == NULL) {
+ fprintf(stderr, "input_cpy %s has no buffer\n", input_cpy->name);
+ exit(1);
+ }
+ GGML_ASSERT(split->inputs[j]->buffer->backend != input_cpy->buffer->backend);
+ GGML_ASSERT(input_cpy->buffer->backend == split_backend);
+ ggml_backend_tensor_copy(split->inputs[j], input_cpy);
+ }
+ // ggml_backend_synchronize(split_backend);
+ int64_t copy_end_us = ggml_time_us();
+ copy_us[split_backend_id] += copy_end_us - copy_start_us;
+
+#if 0
+ char split_filename[GGML_MAX_NAME];
+ snprintf(split_filename, GGML_MAX_NAME, "split_%i_%s.dot", i, ggml_backend_name(split_backend));
+ ggml_graph_dump_dot(split->graph, NULL, split_filename);
+#endif
+
+ uint64_t compute_start_us = ggml_time_us();
+ ggml_backend_graph_compute(split_backend, split->graph);
+ // ggml_backend_synchronize(split_backend);
+ uint64_t compute_end_us = ggml_time_us();
+ compute_us[split_backend_id] += compute_end_us - compute_start_us;
+ }
+
+#if 0
+ // per-backend timings
+ fprintf(stderr, "sched_compute_splits times (%d splits):\n", sched->n_splits);
+ for (int i = 0; i < sched->n_backends; i++) {
+ if (copy_us[i] > 0 || compute_us[i] > 0) {
+ fprintf(stderr, "\t%5.5s: %lu us copy, %lu us compute\n", ggml_backend_name(sched->backends[i]), copy_us[i], compute_us[i]);
+ }
+ }
+#endif
+}
+
+static void sched_reset(ggml_backend_sched_t sched) {
+ for (int i = 0; i < sched->n_backends; i++) {
+ ggml_tallocr_reset(sched->tallocs[i]);
+ }
+}
+
+ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends) {
+ GGML_ASSERT(n_backends <= GGML_MAX_BACKENDS);
+
+ struct ggml_backend_sched * sched = malloc(sizeof(struct ggml_backend_sched));
+ memset(sched, 0, sizeof(struct ggml_backend_sched));
+
+ fprintf(stderr, "ggml_backend_sched size: %lu KB\n", sizeof(struct ggml_backend_sched)/1024);
+
+ sched->n_backends = n_backends;
+ for (int i = 0; i < n_backends; i++) {
+ sched->backends[i] = backends[i];
+ }
+
+ sched->galloc = ggml_gallocr_new();
+
+ // init measure allocs for each backend
+ for (int i = 0; i < n_backends; i++) {
+ sched->tallocs[i] = ggml_tallocr_new_measure_from_backend(backends[i]);
+ }
+
+ return sched;
+}
+
+void ggml_backend_sched_free(ggml_backend_sched_t sched) {
+ if (sched == NULL) {
+ return;
+ }
+ for (int i = 0; i < sched->n_backends; i++) {
+ ggml_tallocr_free(sched->tallocs[i]);
+ }
+ ggml_gallocr_free(sched->galloc);
+ free(sched->hash_set.keys);
+ free(sched->node_talloc);
+ free(sched->node_copies);
+ free(sched);
+}
+
+void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
+ // initialize hash tables
+ size_t hash_size = measure_graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS;
+ sched->hash_set.size = hash_size;
+ sched->hash_set.keys = malloc(sizeof(sched->hash_set.keys[0]) * hash_size);
+ sched->node_talloc = malloc(sizeof(sched->node_talloc[0]) * hash_size);
+ sched->node_copies = malloc(sizeof(sched->node_copies[0]) * hash_size);
+
+ sched_split_graph(sched, measure_graph);
+ sched_alloc_splits(sched);
+
+ // allocate buffers and reset allocators
+ for (int i = 0; i < sched->n_backends; i++) {
+ size_t size = ggml_tallocr_max_size(sched->tallocs[i]);
+ ggml_tallocr_free(sched->tallocs[i]);
+ sched->tallocs[i] = ggml_tallocr_new_from_backend(sched->backends[i], size);
+ }
+
+ sched_reset(sched);
+}
+
+void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
+ GGML_ASSERT(sched->hash_set.size >= graph->visited_hash_table.size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
+
+ sched_split_graph(sched, graph);
+ sched_alloc_splits(sched);
+ sched_compute_splits(sched);
+ sched_reset(sched);
+}
+
+ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend) {
+ int backend_index = sched_backend_prio(sched, backend);
+ return sched->tallocs[backend_index];
+}
+
+ggml_backend_buffer_t ggml_backend_sched_get_buffer(ggml_backend_sched_t sched, ggml_backend_t backend) {
+ int backend_index = sched_backend_prio(sched, backend);
+ return ggml_tallocr_get_buffer(sched->tallocs[backend_index]);
+}
+
+void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) {
+ int backend_index = sched_backend_prio(sched, backend);
+ GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
+ node_allocr(node) = sched->tallocs[backend_index];
+}
--- /dev/null
+#pragma once
+
+#include "ggml.h"
+#include "ggml-alloc.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ //
+ // Backend buffer
+ //
+
+ struct ggml_backend_buffer;
+ typedef struct ggml_backend_buffer * ggml_backend_buffer_t;
+
+ // backend buffer functions
+ GGML_API void ggml_backend_buffer_free (ggml_backend_buffer_t buffer);
+ GGML_API size_t ggml_backend_buffer_get_alignment (ggml_backend_buffer_t buffer);
+ GGML_API void * ggml_backend_buffer_get_base (ggml_backend_buffer_t buffer);
+ GGML_API size_t ggml_backend_buffer_get_size (ggml_backend_buffer_t buffer);
+ GGML_API size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
+ GGML_API void ggml_backend_buffer_init_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
+ GGML_API void ggml_backend_buffer_free_tensor (ggml_backend_buffer_t buffer, struct ggml_tensor * tensor);
+
+ //
+ // Backend
+ //
+
+ struct ggml_backend;
+ typedef struct ggml_backend * ggml_backend_t;
+ typedef void * ggml_backend_graph_plan_t;
+
+ GGML_API ggml_backend_t ggml_get_backend(const struct ggml_tensor * tensor);
+
+ GGML_API const char * ggml_backend_name(ggml_backend_t backend);
+ GGML_API void ggml_backend_free(ggml_backend_t backend);
+
+ GGML_API ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size);
+
+ GGML_API size_t ggml_backend_get_alignment(ggml_backend_t backend);
+
+ GGML_API void ggml_backend_tensor_set_async( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
+ GGML_API void ggml_backend_tensor_get_async(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
+
+ GGML_API void ggml_backend_tensor_set( struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
+ GGML_API void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
+
+ GGML_API void ggml_backend_synchronize(ggml_backend_t backend);
+
+ GGML_API ggml_backend_graph_plan_t ggml_backend_graph_plan_create (ggml_backend_t backend, struct ggml_cgraph * cgraph);
+
+ GGML_API void ggml_backend_graph_plan_free (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
+ GGML_API void ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
+ GGML_API void ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
+ GGML_API bool ggml_backend_supports_op (ggml_backend_t backend, const struct ggml_tensor * op);
+
+ // tensor copy between different backends
+ GGML_API void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst);
+
+ //
+ // CPU backend
+ //
+
+ GGML_API ggml_backend_t ggml_backend_cpu_init(void);
+
+ GGML_API bool ggml_backend_is_cpu(ggml_backend_t backend);
+ GGML_API void ggml_backend_cpu_set_n_threads(ggml_backend_t backend_cpu, int n_threads);
+
+ // Create a backend buffer from an existing pointer
+ GGML_API ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(ggml_backend_t backend_cpu, void * ptr, size_t size);
+
+
+ //
+ // Backend scheduler
+ //
+
+ // The backend scheduler allows for multiple backends to be used together
+ // Handles compute buffer allocation, assignment of tensors to backends, and copying of tensors between backends
+ // The backends are selected based on:
+ // - the backend that supports the operation
+ // - the location of the pre-allocated tensors (e.g. the weights)
+ /*
+ Example usage:
+
+ sched = ggml_backend_sched_new({backend_gpu, backend_gpu2, backend_cpu}, num_backends);
+ // sched is initialized with measure allocators and cannot be used until allocated with a measure graph
+
+ // initialize buffers from a measure graph
+ measure_graph = build_graph(sched); // use the allocr to allocate inputs as needed
+
+ // in build_graph:
+ build_graph(...) {
+ // allocating tensors in a specific backend (optional, recommended: pre-allocate inputs in a different buffer)
+ alloc_cpu = ggml_backend_sched_get_allocr(sched, backend_cpu);
+ ggml_allocr_alloc(alloc_cpu, tensor);
+
+ // manually assigning nodes to a backend (optional, shouldn't be needed in most cases)
+ struct ggml_tensor * node = ggml_mul_mat(ctx, ...);
+ ggml_backend_sched_set_node_backend(sched, node, backend_gpu);
+ }
+
+ // allocate backend buffers from measure graph
+ ggml_backend_sched_init_measure(sched, measure_graph);
+
+ // the scheduler is now ready to compute graphs
+
+ // compute
+ graph = build_graph(sched);
+ ggml_backend_sched_graph_compute(sched, graph);
+ */
+
+ struct ggml_backend_sched;
+ typedef struct ggml_backend_sched * ggml_backend_sched_t;
+
+ // Initialize a backend scheduler
+ GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, int n_backends);
+
+ GGML_API void ggml_backend_sched_free(ggml_backend_sched_t sched);
+
+ // Initialize backend buffers from a measure graph
+ GGML_API void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
+
+ GGML_API ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend);
+ GGML_API ggml_backend_buffer_t ggml_backend_sched_get_buffer (ggml_backend_sched_t sched, ggml_backend_t backend);
+
+ GGML_API void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
+
+ // Allocate a graph on the backend scheduler
+ GGML_API void ggml_backend_sched_graph_compute(
+ ggml_backend_sched_t sched,
+ struct ggml_cgraph * graph);
+
+#ifdef __cplusplus
+}
+#endif
+#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#ifdef __HIP_PLATFORM_AMD__
// for rocblas_initialize()
#include "rocblas/rocblas.h"
-#endif
+#endif // __HIP_PLATFORM_AMD__
+#define CUBLAS_COMPUTE_16F HIPBLAS_R_16F
#define CUBLAS_COMPUTE_32F HIPBLAS_R_32F
#define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_R_32F
#define CUBLAS_GEMM_DEFAULT HIPBLAS_GEMM_DEFAULT
+#define CUBLAS_GEMM_DEFAULT_TENSOR_OP HIPBLAS_GEMM_DEFAULT
#define CUBLAS_OP_N HIPBLAS_OP_N
#define CUBLAS_OP_T HIPBLAS_OP_T
#define CUBLAS_STATUS_SUCCESS HIPBLAS_STATUS_SUCCESS
#define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width)
#define cublasCreate hipblasCreate
#define cublasGemmEx hipblasGemmEx
+#define cublasGemmBatchedEx hipblasGemmBatchedEx
+#define cublasGemmStridedBatchedEx hipblasGemmStridedBatchedEx
#define cublasHandle_t hipblasHandle_t
#define cublasSetMathMode(handle, mode) CUBLAS_STATUS_SUCCESS
#define cublasSetStream hipblasSetStream
#define cublasSgemm hipblasSgemm
#define cublasStatus_t hipblasStatus_t
+#define cudaDeviceCanAccessPeer hipDeviceCanAccessPeer
+#define cudaDeviceDisablePeerAccess hipDeviceDisablePeerAccess
+#define cudaDeviceEnablePeerAccess hipDeviceEnablePeerAccess
#define cudaDeviceProp hipDeviceProp_t
#define cudaDeviceSynchronize hipDeviceSynchronize
#define cudaError_t hipError_t
#define cudaMemcpyHostToDevice hipMemcpyHostToDevice
#define cudaMemcpyKind hipMemcpyKind
#define cudaMemset hipMemset
+#define cudaMemsetAsync hipMemsetAsync
#define cudaOccupancyMaxPotentialBlockSize hipOccupancyMaxPotentialBlockSize
#define cudaSetDevice hipSetDevice
#define cudaStreamCreateWithFlags hipStreamCreateWithFlags
#define cudaStreamNonBlocking hipStreamNonBlocking
#define cudaStreamSynchronize hipStreamSynchronize
-#define cudaStreamWaitEvent(stream, event) hipStreamWaitEvent(stream, event, 0)
+#define cudaStreamWaitEvent(stream, event, flags) hipStreamWaitEvent(stream, event, flags)
#define cudaStream_t hipStream_t
#define cudaSuccess hipSuccess
#else
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda_fp16.h>
-#endif
+#endif // defined(GGML_USE_HIPBLAS)
#include "ggml-cuda.h"
#include "ggml.h"
-
-#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
-#ifndef CC_TURING
-#define CC_TURING 700
+#include "ggml-backend-impl.h"
+
+#define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products
+#define CC_VOLTA 700
+#define CC_OFFSET_AMD 1000000
+#define CC_RDNA2 (CC_OFFSET_AMD + 1030)
+
+// define this if you want to always fallback to MMQ kernels and not use cuBLAS for matrix multiplication
+// on modern hardware, using cuBLAS is recommended as it utilizes F16 tensor cores which are very performant
+// for large computational tasks. the drawback is that this requires some extra amount of VRAM:
+// - 7B quantum model: +100-200 MB
+// - 13B quantum model: +200-400 MB
+//
+//#define GGML_CUDA_FORCE_MMQ
+
+// TODO: improve this to be correct for more hardware
+// for example, currently fails for GeForce GTX 1660 which is TURING arch (> VOLTA) but does not have tensor cores
+// probably other such cases, and not sure what happens on AMD hardware
+#if !defined(GGML_CUDA_FORCE_MMQ)
+#define CUDA_USE_TENSOR_CORES
#endif
+// max batch size to use MMQ kernels when tensor cores are available
+#define MMQ_MAX_BATCH_SIZE 32
+
#if defined(GGML_USE_HIPBLAS)
#define __CUDA_ARCH__ 1300
+#if defined(__gfx1100__) || defined(__gfx1101__) || defined(__gfx1102__) || defined(__gfx1103__) || \
+ defined(__gfx1150__) || defined(__gfx1151__)
+#define RDNA3
+#endif
+
+#if defined(__gfx1030__) || defined(__gfx1031__) || defined(__gfx1032__) || defined(__gfx1033__) || \
+ defined(__gfx1034__) || defined(__gfx1035__) || defined(__gfx1036__) || defined(__gfx1037__)
+#define RDNA2
+#endif
+
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
#endif
return c;
}
-#endif
+#endif // defined(GGML_USE_HIPBLAS)
#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
do { \
cudaError_t err_ = (err); \
if (err_ != cudaSuccess) { \
- fprintf(stderr, "CUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \
+ int dev_id; \
+ cudaGetDevice(&dev_id); \
+ fprintf(stderr, "\nCUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \
cudaGetErrorString(err_)); \
+ fprintf(stderr, "current device: %d\n", dev_id); \
exit(1); \
} \
} while (0)
do { \
cublasStatus_t err_ = (err); \
if (err_ != CUBLAS_STATUS_SUCCESS) { \
+ int dev_id; \
+ cudaGetDevice(&dev_id); \
fprintf(stderr, "\ncuBLAS error %d at %s:%d: %s\n", \
err_, __FILE__, __LINE__, cublasGetStatusString(err_)); \
+ fprintf(stderr, "current device: %d\n", dev_id); \
exit(1); \
} \
} while (0)
do { \
cublasStatus_t err_ = (err); \
if (err_ != CUBLAS_STATUS_SUCCESS) { \
+ int id; \
+ cudaGetDevice(&id); \
fprintf(stderr, "\ncuBLAS error %d at %s:%d\n", err_, __FILE__, __LINE__); \
+ fprintf(stderr, "current device: %d\n", id); \
exit(1); \
} \
} while (0)
#endif // CUDART_VERSION >= 11
+#if CUDART_VERSION >= 11100
+#define GGML_CUDA_ASSUME(x) __builtin_assume(x)
+#else
+#define GGML_CUDA_ASSUME(x)
+#endif // CUDART_VERSION >= 11100
+
#ifdef GGML_CUDA_F16
typedef half dfloat; // dequantize float
typedef half2 dfloat2;
return *((int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
}
+template<typename T>
+using to_t_cuda_t = void (*)(const void * __restrict__ x, T * __restrict__ y, int k, cudaStream_t stream);
+typedef to_t_cuda_t<float> to_fp32_cuda_t;
+typedef to_t_cuda_t<half> to_fp16_cuda_t;
+
typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, dfloat2 & v);
-typedef void (*to_fp32_cuda_t)(const void * __restrict__ x, float * __restrict__ y, int k, cudaStream_t stream);
typedef void (*dot_kernel_k_t)(const void * __restrict__ vx, const int ib, const int iqs, const float * __restrict__ y, float & v);
typedef void (*cpy_kernel_t)(const char * cx, char * cdst);
typedef void (*ggml_cuda_func_t)(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
-typedef void (*ggml_cuda_op_t)(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i, float * src0_ddf_i,
- float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main);
+typedef void (*ggml_cuda_op_mul_mat_t)(
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
+ const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
+ const int64_t src1_padded_row_size, const cudaStream_t & stream);
+typedef void (*ggml_cuda_op_flatten_t)(
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream);
// QK = number of values after dequantization
// QR = QK / number of values before dequantization
#define CUDA_SILU_BLOCK_SIZE 256
#define CUDA_CPY_BLOCK_SIZE 32
#define CUDA_SCALE_BLOCK_SIZE 256
+#define CUDA_CLAMP_BLOCK_SIZE 256
#define CUDA_ROPE_BLOCK_SIZE 256
#define CUDA_ALIBI_BLOCK_SIZE 32
#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32
#define CUDA_QUANTIZE_BLOCK_SIZE 256
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
+#define CUDA_GET_ROWS_BLOCK_SIZE 256
// dmmv = dequantize_mul_mat_vec
#ifndef GGML_CUDA_DMMV_X
static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
#endif
+#ifndef GGML_CUDA_PEER_MAX_BATCH_SIZE
+#define GGML_CUDA_PEER_MAX_BATCH_SIZE 128
+#endif // GGML_CUDA_PEER_MAX_BATCH_SIZE
+
+#define MUL_MAT_SRC1_COL_STRIDE 128
+
+#define MAX_STREAMS 8
+static cudaStream_t g_cudaStreams[GGML_CUDA_MAX_DEVICES][MAX_STREAMS] = { nullptr };
+static cudaMemPool_t g_cudaMemPools[GGML_CUDA_MAX_DEVICES] = { nullptr };
+
struct ggml_tensor_extra_gpu {
void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors
- cudaEvent_t events[GGML_CUDA_MAX_DEVICES]; // events for synchronizing multiple GPUs
+ cudaEvent_t events[GGML_CUDA_MAX_DEVICES][MAX_STREAMS]; // events for synchronizing multiple GPUs
};
+// this is faster on Windows
+// probably because the Windows CUDA libraries forget to make this check before invoking the drivers
+inline cudaError_t ggml_cuda_set_device(const int device) {
+ int current_device;
+ CUDA_CHECK(cudaGetDevice(¤t_device));
+
+ if (device == current_device) {
+ return cudaSuccess;
+ }
+
+ return cudaSetDevice(device);
+}
+
static int g_device_count = -1;
static int g_main_device = 0;
static int g_compute_capabilities[GGML_CUDA_MAX_DEVICES];
static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
-static bool g_mul_mat_q = true;
static void * g_scratch_buffer = nullptr;
-static size_t g_scratch_size = 1024*1024*1024; // 1 GB by default
+static size_t g_scratch_size = 0; // disabled by default
static size_t g_scratch_offset = 0;
static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
-static cudaStream_t g_cudaStreams_main[GGML_CUDA_MAX_DEVICES] = { nullptr };
-
static __global__ void add_f32(const float * x, const float * y, float * dst, const int kx, const int ky) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
dst[i] = __hadd(x[i], __float2half(y[i]));
}
+static __global__ void add_f16_f32_f32(const half * x, const float * y, float * dst, const int k) {
+ const int i = blockDim.x*blockIdx.x + threadIdx.x;
+
+ if (i >= k) {
+ return;
+ }
+ dst[i] = __half2float(x[i]) + y[i];
+}
+
static __global__ void mul_f32(const float * x, const float * y, float * dst, const int kx, const int ky) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
//================================== k-quants
-static __global__ void dequantize_block_q2_K(const void * __restrict__ vx, float * __restrict__ yy) {
+template<typename dst_t>
+static __global__ void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const int i = blockIdx.x;
const block_q2_K * x = (const block_q2_K *) vx;
const int is = 8*n + l/16;
const uint8_t q = x[i].qs[32*n + l];
- float * y = yy + i*QK_K + 128*n;
+ dst_t * y = yy + i*QK_K + 128*n;
float dall = __low2half(x[i].dm);
float dmin = __high2half(x[i].dm);
const int is = tid/16; // 0 or 1
const int il = tid%16; // 0...15
const uint8_t q = x[i].qs[il] >> (2*is);
- float * y = yy + i*QK_K + 16*is + il;
+ dst_t * y = yy + i*QK_K + 16*is + il;
float dall = __low2half(x[i].dm);
float dmin = __high2half(x[i].dm);
y[ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
}
-static __global__ void dequantize_block_q3_K(const void * __restrict__ vx, float * __restrict__ yy) {
+template<typename dst_t>
+static __global__ void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const int i = blockIdx.x;
const block_q3_K * x = (const block_q3_K *) vx;
float d_all = x[i].d;
float dl = d_all * (us - 32);
- float * y = yy + i*QK_K + 128*n + 32*j;
+ dst_t * y = yy + i*QK_K + 128*n + 32*j;
const uint8_t * q = x[i].qs + 32*n;
const uint8_t * hm = x[i].hmask;
const int im = il/8; // 0...1
const int in = il%8; // 0...7
- float * y = yy + i*QK_K + 16*is + il;
+ dst_t * y = yy + i*QK_K + 16*is + il;
const uint8_t q = x[i].qs[il] >> (2*is);
const uint8_t h = x[i].hmask[in] >> (2*is + im);
}
#endif
-static __global__ void dequantize_block_q4_K(const void * __restrict__ vx, float * __restrict__ yy) {
+template<typename dst_t>
+static __global__ void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const block_q4_K * x = (const block_q4_K *) vx;
const int i = blockIdx.x;
const int is = 2*il;
const int n = 4;
- float * y = yy + i*QK_K + 64*il + n*ir;
+ dst_t * y = yy + i*QK_K + 64*il + n*ir;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
#else
const int tid = threadIdx.x;
const uint8_t * q = x[i].qs;
- float * y = yy + i*QK_K;
+ dst_t * y = yy + i*QK_K;
const float d = (float)x[i].dm[0];
const float m = (float)x[i].dm[1];
y[tid+ 0] = d * (x[i].scales[0] & 0xF) * (q[tid] & 0xF) - m * (x[i].scales[0] >> 4);
#endif
}
-static __global__ void dequantize_block_q5_K(const void * __restrict__ vx, float * __restrict__ yy) {
+template<typename dst_t>
+static __global__ void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const block_q5_K * x = (const block_q5_K *) vx;
const int i = blockIdx.x;
const int ir = tid%16; // ir is in 0...15
const int is = 2*il; // is is in 0...6
- float * y = yy + i*QK_K + 64*il + 2*ir;
+ dst_t * y = yy + i*QK_K + 64*il + 2*ir;
const float dall = __low2half(x[i].dm);
const float dmin = __high2half(x[i].dm);
const int is = tid/16; // 0 or 1
const uint8_t h = x[i].qh[in] >> im;
const float d = x[i].d;
- float * y = yy + i*QK_K + tid;
+ dst_t * y = yy + i*QK_K + tid;
y[ 0] = d * x[i].scales[is+0] * ((q & 0xF) - ((h >> 0) & 1 ? 0 : 16));
y[32] = d * x[i].scales[is+2] * ((q >> 4) - ((h >> 4) & 1 ? 0 : 16));
#endif
}
-static __global__ void dequantize_block_q6_K(const void * __restrict__ vx, float * __restrict__ yy) {
+template<typename dst_t>
+static __global__ void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const block_q6_K * x = (const block_q6_K *) vx;
const int i = blockIdx.x;
const int il = tid - 32*ip; // 0...32
const int is = 8*ip + il/16;
- float * y = yy + i*QK_K + 128*ip + il;
+ dst_t * y = yy + i*QK_K + 128*ip + il;
const float d = x[i].d;
const int ip = tid/16; // 0 or 1
const int il = tid - 16*ip; // 0...15
- float * y = yy + i*QK_K + 16*ip + il;
+ dst_t * y = yy + i*QK_K + 16*ip + il;
const float d = x[i].d;
v.y = x[ib + iqs + 1];
}
+static __device__ void convert_f32(const void * vx, const int ib, const int iqs, dfloat2 & v){
+ const float * x = (const float *) vx;
+
+ // automatic half -> float type cast if dfloat == float
+ v.x = x[ib + iqs + 0];
+ v.y = x[ib + iqs + 1];
+}
+
static __global__ void quantize_q8_1(const float * __restrict__ x, void * __restrict__ vy, const int kx, const int kx_padded) {
const int ix = blockDim.x*blockIdx.x + threadIdx.x;
reinterpret_cast<half&>(y[ib].ds.y) = sum;
}
-template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
-static __global__ void dequantize_block(const void * __restrict__ vx, float * __restrict__ y, const int k) {
+template<int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
+static __global__ void k_get_rows(const void * x, const int32_t * y, dst_t * dst, const int ncols) {
+ const int col = (blockIdx.x*blockDim.x + threadIdx.x)*2;
+ const int row = blockDim.y*blockIdx.y + threadIdx.y;
+
+ if (col >= ncols) {
+ return;
+ }
+
+ const int r = y[row];
+
+ // copy x[r*ncols + col] to dst[row*ncols + col]
+ const int xi = r*ncols + col;
+ const int di = row*ncols + col;
+
+ const int ib = xi/qk; // block index
+ const int iqs = (xi%qk)/qr; // quant index
+ const int iybs = di - di%qk; // y block start index
+ const int y_offset = qr == 1 ? 1 : qk/2;
+
+ // dequantize
+ dfloat2 v;
+ dequantize_kernel(x, ib, iqs, v);
+
+ dst[iybs + iqs + 0] = v.x;
+ dst[iybs + iqs + y_offset] = v.y;
+}
+
+template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
+static __global__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) {
const int i = blockDim.x*blockIdx.x + 2*threadIdx.x;
if (i >= k) {
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- __builtin_assume(i_offset >= 0);
- __builtin_assume(i_offset < nwarps);
- __builtin_assume(k >= 0);
- __builtin_assume(k < WARP_SIZE);
+ GGML_CUDA_ASSUME(i_offset >= 0);
+ GGML_CUDA_ASSUME(i_offset < nwarps);
+ GGML_CUDA_ASSUME(k >= 0);
+ GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI4_0;
const int kqsx = k % QI4_0;
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- __builtin_assume(i_offset >= 0);
- __builtin_assume(i_offset < nwarps);
- __builtin_assume(k >= 0);
- __builtin_assume(k < WARP_SIZE);
+ GGML_CUDA_ASSUME(i_offset >= 0);
+ GGML_CUDA_ASSUME(i_offset < nwarps);
+ GGML_CUDA_ASSUME(k >= 0);
+ GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI4_1;
const int kqsx = k % QI4_1;
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- __builtin_assume(i_offset >= 0);
- __builtin_assume(i_offset < nwarps);
- __builtin_assume(k >= 0);
- __builtin_assume(k < WARP_SIZE);
+ GGML_CUDA_ASSUME(i_offset >= 0);
+ GGML_CUDA_ASSUME(i_offset < nwarps);
+ GGML_CUDA_ASSUME(k >= 0);
+ GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI5_0;
const int kqsx = k % QI5_0;
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- __builtin_assume(i_offset >= 0);
- __builtin_assume(i_offset < nwarps);
- __builtin_assume(k >= 0);
- __builtin_assume(k < WARP_SIZE);
+ GGML_CUDA_ASSUME(i_offset >= 0);
+ GGML_CUDA_ASSUME(i_offset < nwarps);
+ GGML_CUDA_ASSUME(k >= 0);
+ GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI5_1;
const int kqsx = k % QI5_1;
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- __builtin_assume(i_offset >= 0);
- __builtin_assume(i_offset < nwarps);
- __builtin_assume(k >= 0);
- __builtin_assume(k < WARP_SIZE);
+ GGML_CUDA_ASSUME(i_offset >= 0);
+ GGML_CUDA_ASSUME(i_offset < nwarps);
+ GGML_CUDA_ASSUME(k >= 0);
+ GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI8_0;
const int kqsx = k % QI8_0;
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- __builtin_assume(i_offset >= 0);
- __builtin_assume(i_offset < nwarps);
- __builtin_assume(k >= 0);
- __builtin_assume(k < WARP_SIZE);
+ GGML_CUDA_ASSUME(i_offset >= 0);
+ GGML_CUDA_ASSUME(i_offset < nwarps);
+ GGML_CUDA_ASSUME(k >= 0);
+ GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI2_K;
const int kqsx = k % QI2_K;
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- __builtin_assume(i_offset >= 0);
- __builtin_assume(i_offset < nwarps);
- __builtin_assume(k >= 0);
- __builtin_assume(k < WARP_SIZE);
+ GGML_CUDA_ASSUME(i_offset >= 0);
+ GGML_CUDA_ASSUME(i_offset < nwarps);
+ GGML_CUDA_ASSUME(k >= 0);
+ GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI3_K;
const int kqsx = k % QI3_K;
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- __builtin_assume(i_offset >= 0);
- __builtin_assume(i_offset < nwarps);
- __builtin_assume(k >= 0);
- __builtin_assume(k < WARP_SIZE);
+ GGML_CUDA_ASSUME(i_offset >= 0);
+ GGML_CUDA_ASSUME(i_offset < nwarps);
+ GGML_CUDA_ASSUME(k >= 0);
+ GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI4_K; // == 0 if QK_K == 256
const int kqsx = k % QI4_K; // == k if QK_K == 256
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- __builtin_assume(i_offset >= 0);
- __builtin_assume(i_offset < nwarps);
- __builtin_assume(k >= 0);
- __builtin_assume(k < WARP_SIZE);
+ GGML_CUDA_ASSUME(i_offset >= 0);
+ GGML_CUDA_ASSUME(i_offset < nwarps);
+ GGML_CUDA_ASSUME(k >= 0);
+ GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI5_K; // == 0 if QK_K == 256
const int kqsx = k % QI5_K; // == k if QK_K == 256
const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) {
- __builtin_assume(i_offset >= 0);
- __builtin_assume(i_offset < nwarps);
- __builtin_assume(k >= 0);
- __builtin_assume(k < WARP_SIZE);
+ GGML_CUDA_ASSUME(i_offset >= 0);
+ GGML_CUDA_ASSUME(i_offset < nwarps);
+ GGML_CUDA_ASSUME(k >= 0);
+ GGML_CUDA_ASSUME(k < WARP_SIZE);
const int kbx = k / QI6_K; // == 0 if QK_K == 256
const int kqsx = k % QI6_K; // == k if QK_K == 256
}
}
+#define MMQ_X_Q4_0_RDNA2 64
+#define MMQ_Y_Q4_0_RDNA2 128
+#define NWARPS_Q4_0_RDNA2 8
+#define MMQ_X_Q4_0_RDNA1 64
+#define MMQ_Y_Q4_0_RDNA1 64
+#define NWARPS_Q4_0_RDNA1 8
+#if defined(CUDA_USE_TENSOR_CORES)
+#define MMQ_X_Q4_0_AMPERE 4
+#define MMQ_Y_Q4_0_AMPERE 32
+#define NWARPS_Q4_0_AMPERE 4
+#else
#define MMQ_X_Q4_0_AMPERE 64
#define MMQ_Y_Q4_0_AMPERE 128
#define NWARPS_Q4_0_AMPERE 4
+#endif
#define MMQ_X_Q4_0_PASCAL 64
#define MMQ_Y_Q4_0_PASCAL 64
#define NWARPS_Q4_0_PASCAL 8
-template <bool need_check> static __global__ void mul_mat_q4_0(
+template <bool need_check> static __global__ void
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ __launch_bounds__(WARP_SIZE*NWARPS_Q4_0_RDNA2, 2)
+#endif // defined(RDNA3) || defined(RDNA2)
+#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+ mul_mat_q4_0(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
-#if __CUDA_ARCH__ >= CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ const int mmq_x = MMQ_X_Q4_0_RDNA2;
+ const int mmq_y = MMQ_Y_Q4_0_RDNA2;
+ const int nwarps = NWARPS_Q4_0_RDNA2;
+#else
+ const int mmq_x = MMQ_X_Q4_0_RDNA1;
+ const int mmq_y = MMQ_Y_Q4_0_RDNA1;
+ const int nwarps = NWARPS_Q4_0_RDNA1;
+#endif // defined(RDNA3) || defined(RDNA2)
+
+ mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>,
+ load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= CC_VOLTA
const int mmq_x = MMQ_X_Q4_0_AMPERE;
const int mmq_y = MMQ_Y_Q4_0_AMPERE;
const int nwarps = NWARPS_Q4_0_AMPERE;
#else
(void) vec_dot_q4_0_q8_1_mul_mat;
assert(false);
-#endif // __CUDA_ARCH__ >= CC_TURING
-}
-
+#endif // __CUDA_ARCH__ >= CC_VOLTA
+}
+
+#define MMQ_X_Q4_1_RDNA2 64
+#define MMQ_Y_Q4_1_RDNA2 128
+#define NWARPS_Q4_1_RDNA2 8
+#define MMQ_X_Q4_1_RDNA1 64
+#define MMQ_Y_Q4_1_RDNA1 64
+#define NWARPS_Q4_1_RDNA1 8
+#if defined(CUDA_USE_TENSOR_CORES)
+#define MMQ_X_Q4_1_AMPERE 4
+#define MMQ_Y_Q4_1_AMPERE 32
+#define NWARPS_Q4_1_AMPERE 4
+#else
#define MMQ_X_Q4_1_AMPERE 64
#define MMQ_Y_Q4_1_AMPERE 128
#define NWARPS_Q4_1_AMPERE 4
+#endif
#define MMQ_X_Q4_1_PASCAL 64
#define MMQ_Y_Q4_1_PASCAL 64
#define NWARPS_Q4_1_PASCAL 8
template <bool need_check> static __global__ void
-#if __CUDA_ARCH__ < CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ __launch_bounds__(WARP_SIZE*NWARPS_Q4_1_RDNA2, 2)
+#endif // defined(RDNA3) || defined(RDNA2)
+#elif __CUDA_ARCH__ < CC_VOLTA
__launch_bounds__(WARP_SIZE*NWARPS_Q4_1_PASCAL, 2)
-#endif // __CUDA_ARCH__ < CC_TURING
+#endif // __CUDA_ARCH__ < CC_VOLTA
mul_mat_q4_1(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
-#if __CUDA_ARCH__ >= CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ const int mmq_x = MMQ_X_Q4_1_RDNA2;
+ const int mmq_y = MMQ_Y_Q4_1_RDNA2;
+ const int nwarps = NWARPS_Q4_1_RDNA2;
+#else
+ const int mmq_x = MMQ_X_Q4_1_RDNA1;
+ const int mmq_y = MMQ_Y_Q4_1_RDNA1;
+ const int nwarps = NWARPS_Q4_1_RDNA1;
+#endif // defined(RDNA3) || defined(RDNA2)
+
+ mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps, allocate_tiles_q4_1<mmq_y>,
+ load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ, vec_dot_q4_1_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= CC_VOLTA
const int mmq_x = MMQ_X_Q4_1_AMPERE;
const int mmq_y = MMQ_Y_Q4_1_AMPERE;
const int nwarps = NWARPS_Q4_1_AMPERE;
#else
(void) vec_dot_q4_1_q8_1_mul_mat;
assert(false);
-#endif // __CUDA_ARCH__ >= CC_TURING
-}
-
+#endif // __CUDA_ARCH__ >= CC_VOLTA
+}
+
+#define MMQ_X_Q5_0_RDNA2 64
+#define MMQ_Y_Q5_0_RDNA2 128
+#define NWARPS_Q5_0_RDNA2 8
+#define MMQ_X_Q5_0_RDNA1 64
+#define MMQ_Y_Q5_0_RDNA1 64
+#define NWARPS_Q5_0_RDNA1 8
+#if defined(CUDA_USE_TENSOR_CORES)
+#define MMQ_X_Q5_0_AMPERE 4
+#define MMQ_Y_Q5_0_AMPERE 32
+#define NWARPS_Q5_0_AMPERE 4
+#else
#define MMQ_X_Q5_0_AMPERE 128
#define MMQ_Y_Q5_0_AMPERE 64
#define NWARPS_Q5_0_AMPERE 4
+#endif
#define MMQ_X_Q5_0_PASCAL 64
#define MMQ_Y_Q5_0_PASCAL 64
#define NWARPS_Q5_0_PASCAL 8
-template <bool need_check> static __global__ void mul_mat_q5_0(
+template <bool need_check> static __global__ void
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ __launch_bounds__(WARP_SIZE*NWARPS_Q5_0_RDNA2, 2)
+#endif // defined(RDNA3) || defined(RDNA2)
+#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+ mul_mat_q5_0(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
-#if __CUDA_ARCH__ >= CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ const int mmq_x = MMQ_X_Q5_0_RDNA2;
+ const int mmq_y = MMQ_Y_Q5_0_RDNA2;
+ const int nwarps = NWARPS_Q5_0_RDNA2;
+#else
+ const int mmq_x = MMQ_X_Q5_0_RDNA1;
+ const int mmq_y = MMQ_Y_Q5_0_RDNA1;
+ const int nwarps = NWARPS_Q5_0_RDNA1;
+#endif // defined(RDNA3) || defined(RDNA2)
+
+ mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps, allocate_tiles_q5_0<mmq_y>,
+ load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ, vec_dot_q5_0_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= CC_VOLTA
const int mmq_x = MMQ_X_Q5_0_AMPERE;
const int mmq_y = MMQ_Y_Q5_0_AMPERE;
const int nwarps = NWARPS_Q5_0_AMPERE;
#else
(void) vec_dot_q5_0_q8_1_mul_mat;
assert(false);
-#endif // __CUDA_ARCH__ >= CC_TURING
-}
-
+#endif // __CUDA_ARCH__ >= CC_VOLTA
+}
+
+#define MMQ_X_Q5_1_RDNA2 64
+#define MMQ_Y_Q5_1_RDNA2 128
+#define NWARPS_Q5_1_RDNA2 8
+#define MMQ_X_Q5_1_RDNA1 64
+#define MMQ_Y_Q5_1_RDNA1 64
+#define NWARPS_Q5_1_RDNA1 8
+#if defined(CUDA_USE_TENSOR_CORES)
+#define MMQ_X_Q5_1_AMPERE 4
+#define MMQ_Y_Q5_1_AMPERE 32
+#define NWARPS_Q5_1_AMPERE 4
+#else
#define MMQ_X_Q5_1_AMPERE 128
#define MMQ_Y_Q5_1_AMPERE 64
#define NWARPS_Q5_1_AMPERE 4
+#endif
#define MMQ_X_Q5_1_PASCAL 64
#define MMQ_Y_Q5_1_PASCAL 64
#define NWARPS_Q5_1_PASCAL 8
-template <bool need_check> static __global__ void mul_mat_q5_1(
+template <bool need_check> static __global__ void
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ __launch_bounds__(WARP_SIZE*NWARPS_Q5_1_RDNA2, 2)
+#endif // defined(RDNA3) || defined(RDNA2)
+#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+mul_mat_q5_1(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
-#if __CUDA_ARCH__ >= CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ const int mmq_x = MMQ_X_Q5_1_RDNA2;
+ const int mmq_y = MMQ_Y_Q5_1_RDNA2;
+ const int nwarps = NWARPS_Q5_1_RDNA2;
+#else
+ const int mmq_x = MMQ_X_Q5_1_RDNA1;
+ const int mmq_y = MMQ_Y_Q5_1_RDNA1;
+ const int nwarps = NWARPS_Q5_1_RDNA1;
+#endif // defined(RDNA3) || defined(RDNA2)
+
+ mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps, allocate_tiles_q5_1<mmq_y>,
+ load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ, vec_dot_q5_1_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= CC_VOLTA
const int mmq_x = MMQ_X_Q5_1_AMPERE;
const int mmq_y = MMQ_Y_Q5_1_AMPERE;
const int nwarps = NWARPS_Q5_1_AMPERE;
#else
(void) vec_dot_q5_1_q8_1_mul_mat;
assert(false);
-#endif // __CUDA_ARCH__ >= CC_TURING
-}
-
+#endif // __CUDA_ARCH__ >= CC_VOLTA
+}
+
+#define MMQ_X_Q8_0_RDNA2 64
+#define MMQ_Y_Q8_0_RDNA2 128
+#define NWARPS_Q8_0_RDNA2 8
+#define MMQ_X_Q8_0_RDNA1 64
+#define MMQ_Y_Q8_0_RDNA1 64
+#define NWARPS_Q8_0_RDNA1 8
+#if defined(CUDA_USE_TENSOR_CORES)
+#define MMQ_X_Q8_0_AMPERE 4
+#define MMQ_Y_Q8_0_AMPERE 32
+#define NWARPS_Q8_0_AMPERE 4
+#else
#define MMQ_X_Q8_0_AMPERE 128
#define MMQ_Y_Q8_0_AMPERE 64
#define NWARPS_Q8_0_AMPERE 4
+#endif
#define MMQ_X_Q8_0_PASCAL 64
#define MMQ_Y_Q8_0_PASCAL 64
#define NWARPS_Q8_0_PASCAL 8
-template <bool need_check> static __global__ void mul_mat_q8_0(
+template <bool need_check> static __global__ void
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ __launch_bounds__(WARP_SIZE*NWARPS_Q8_0_RDNA2, 2)
+#endif // defined(RDNA3) || defined(RDNA2)
+#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+ mul_mat_q8_0(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
-#if __CUDA_ARCH__ >= CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ const int mmq_x = MMQ_X_Q8_0_RDNA2;
+ const int mmq_y = MMQ_Y_Q8_0_RDNA2;
+ const int nwarps = NWARPS_Q8_0_RDNA2;
+#else
+ const int mmq_x = MMQ_X_Q8_0_RDNA1;
+ const int mmq_y = MMQ_Y_Q8_0_RDNA1;
+ const int nwarps = NWARPS_Q8_0_RDNA1;
+#endif // defined(RDNA3) || defined(RDNA2)
+
+ mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps, allocate_tiles_q8_0<mmq_y>,
+ load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ, vec_dot_q8_0_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= CC_VOLTA
const int mmq_x = MMQ_X_Q8_0_AMPERE;
const int mmq_y = MMQ_Y_Q8_0_AMPERE;
const int nwarps = NWARPS_Q8_0_AMPERE;
#else
(void) vec_dot_q8_0_q8_1_mul_mat;
assert(false);
-#endif // __CUDA_ARCH__ >= CC_TURING
-}
-
+#endif // __CUDA_ARCH__ >= CC_VOLTA
+}
+
+#define MMQ_X_Q2_K_RDNA2 64
+#define MMQ_Y_Q2_K_RDNA2 128
+#define NWARPS_Q2_K_RDNA2 8
+#define MMQ_X_Q2_K_RDNA1 128
+#define MMQ_Y_Q2_K_RDNA1 32
+#define NWARPS_Q2_K_RDNA1 8
+#if defined(CUDA_USE_TENSOR_CORES)
+#define MMQ_X_Q2_K_AMPERE 4
+#define MMQ_Y_Q2_K_AMPERE 32
+#define NWARPS_Q2_K_AMPERE 4
+#else
#define MMQ_X_Q2_K_AMPERE 64
#define MMQ_Y_Q2_K_AMPERE 128
#define NWARPS_Q2_K_AMPERE 4
+#endif
#define MMQ_X_Q2_K_PASCAL 64
#define MMQ_Y_Q2_K_PASCAL 64
#define NWARPS_Q2_K_PASCAL 8
-template <bool need_check> static __global__ void mul_mat_q2_K(
+template <bool need_check> static __global__ void
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ __launch_bounds__(WARP_SIZE*NWARPS_Q2_K_RDNA2, 2)
+#endif // defined(RDNA3) || defined(RDNA2)
+#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+mul_mat_q2_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
-#if __CUDA_ARCH__ >= CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ const int mmq_x = MMQ_X_Q2_K_RDNA2;
+ const int mmq_y = MMQ_Y_Q2_K_RDNA2;
+ const int nwarps = NWARPS_Q2_K_RDNA2;
+#else
+ const int mmq_x = MMQ_X_Q2_K_RDNA1;
+ const int mmq_y = MMQ_Y_Q2_K_RDNA1;
+ const int nwarps = NWARPS_Q2_K_RDNA1;
+#endif // defined(RDNA3) || defined(RDNA2)
+
+ mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps, allocate_tiles_q2_K<mmq_y>,
+ load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ, vec_dot_q2_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= CC_VOLTA
const int mmq_x = MMQ_X_Q2_K_AMPERE;
const int mmq_y = MMQ_Y_Q2_K_AMPERE;
const int nwarps = NWARPS_Q2_K_AMPERE;
#else
(void) vec_dot_q2_K_q8_1_mul_mat;
assert(false);
-#endif // __CUDA_ARCH__ >= CC_TURING
-}
-
+#endif // __CUDA_ARCH__ >= CC_VOLTA
+}
+
+#define MMQ_X_Q3_K_RDNA2 128
+#define MMQ_Y_Q3_K_RDNA2 64
+#define NWARPS_Q3_K_RDNA2 8
+#define MMQ_X_Q3_K_RDNA1 32
+#define MMQ_Y_Q3_K_RDNA1 128
+#define NWARPS_Q3_K_RDNA1 8
+#if defined(CUDA_USE_TENSOR_CORES)
+#define MMQ_X_Q3_K_AMPERE 4
+#define MMQ_Y_Q3_K_AMPERE 32
+#define NWARPS_Q3_K_AMPERE 4
+#else
#define MMQ_X_Q3_K_AMPERE 128
#define MMQ_Y_Q3_K_AMPERE 128
#define NWARPS_Q3_K_AMPERE 4
+#endif
#define MMQ_X_Q3_K_PASCAL 64
#define MMQ_Y_Q3_K_PASCAL 64
#define NWARPS_Q3_K_PASCAL 8
template <bool need_check> static __global__ void
-#if __CUDA_ARCH__ < CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ __launch_bounds__(WARP_SIZE*NWARPS_Q3_K_RDNA2, 2)
+#endif // defined(RDNA3) || defined(RDNA2)
+#elif __CUDA_ARCH__ < CC_VOLTA
__launch_bounds__(WARP_SIZE*NWARPS_Q3_K_PASCAL, 2)
-#endif // __CUDA_ARCH__ < CC_TURING
+#endif // __CUDA_ARCH__ < CC_VOLTA
mul_mat_q3_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
-#if __CUDA_ARCH__ >= CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ const int mmq_x = MMQ_X_Q3_K_RDNA2;
+ const int mmq_y = MMQ_Y_Q3_K_RDNA2;
+ const int nwarps = NWARPS_Q3_K_RDNA2;
+#else
+ const int mmq_x = MMQ_X_Q3_K_RDNA1;
+ const int mmq_y = MMQ_Y_Q3_K_RDNA1;
+ const int nwarps = NWARPS_Q3_K_RDNA1;
+#endif // defined(RDNA3) || defined(RDNA2)
+
+ mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps, allocate_tiles_q3_K<mmq_y>,
+ load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ, vec_dot_q3_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= CC_VOLTA
const int mmq_x = MMQ_X_Q3_K_AMPERE;
const int mmq_y = MMQ_Y_Q3_K_AMPERE;
const int nwarps = NWARPS_Q3_K_AMPERE;
#else
(void) vec_dot_q3_K_q8_1_mul_mat;
assert(false);
-#endif // __CUDA_ARCH__ >= CC_TURING
-}
-
+#endif // __CUDA_ARCH__ >= CC_VOLTA
+}
+
+#define MMQ_X_Q4_K_RDNA2 64
+#define MMQ_Y_Q4_K_RDNA2 128
+#define NWARPS_Q4_K_RDNA2 8
+#define MMQ_X_Q4_K_RDNA1 32
+#define MMQ_Y_Q4_K_RDNA1 64
+#define NWARPS_Q4_K_RDNA1 8
+#if defined(CUDA_USE_TENSOR_CORES)
+#define MMQ_X_Q4_K_AMPERE 4
+#define MMQ_Y_Q4_K_AMPERE 32
+#define NWARPS_Q4_K_AMPERE 4
+#else
#define MMQ_X_Q4_K_AMPERE 64
#define MMQ_Y_Q4_K_AMPERE 128
#define NWARPS_Q4_K_AMPERE 4
+#endif
#define MMQ_X_Q4_K_PASCAL 64
#define MMQ_Y_Q4_K_PASCAL 64
#define NWARPS_Q4_K_PASCAL 8
template <bool need_check> static __global__ void
-#if __CUDA_ARCH__ < CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ __launch_bounds__(WARP_SIZE*NWARPS_Q4_K_RDNA2, 2)
+#endif // defined(RDNA3) || defined(RDNA2)
+#elif __CUDA_ARCH__ < CC_VOLTA
__launch_bounds__(WARP_SIZE*NWARPS_Q4_K_PASCAL, 2)
-#endif // __CUDA_ARCH__ < CC_TURING
+#endif // __CUDA_ARCH__ < CC_VOLTA
mul_mat_q4_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
-#if __CUDA_ARCH__ >= CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ const int mmq_x = MMQ_X_Q4_K_RDNA2;
+ const int mmq_y = MMQ_Y_Q4_K_RDNA2;
+ const int nwarps = NWARPS_Q4_K_RDNA2;
+#else
+ const int mmq_x = MMQ_X_Q4_K_RDNA1;
+ const int mmq_y = MMQ_Y_Q4_K_RDNA1;
+ const int nwarps = NWARPS_Q4_K_RDNA1;
+#endif // defined(RDNA3) || defined(RDNA2)
+
+ mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps, allocate_tiles_q4_K<mmq_y>,
+ load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ, vec_dot_q4_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= CC_VOLTA
const int mmq_x = MMQ_X_Q4_K_AMPERE;
const int mmq_y = MMQ_Y_Q4_K_AMPERE;
const int nwarps = NWARPS_Q4_K_AMPERE;
#else
(void) vec_dot_q4_K_q8_1_mul_mat;
assert(false);
-#endif // __CUDA_ARCH__ >= CC_TURING
-}
-
+#endif // __CUDA_ARCH__ >= CC_VOLTA
+}
+
+#define MMQ_X_Q5_K_RDNA2 64
+#define MMQ_Y_Q5_K_RDNA2 128
+#define NWARPS_Q5_K_RDNA2 8
+#define MMQ_X_Q5_K_RDNA1 32
+#define MMQ_Y_Q5_K_RDNA1 64
+#define NWARPS_Q5_K_RDNA1 8
+#if defined(CUDA_USE_TENSOR_CORES)
+#define MMQ_X_Q5_K_AMPERE 4
+#define MMQ_Y_Q5_K_AMPERE 32
+#define NWARPS_Q5_K_AMPERE 4
+#else
#define MMQ_X_Q5_K_AMPERE 64
#define MMQ_Y_Q5_K_AMPERE 128
#define NWARPS_Q5_K_AMPERE 4
+#endif
#define MMQ_X_Q5_K_PASCAL 64
#define MMQ_Y_Q5_K_PASCAL 64
#define NWARPS_Q5_K_PASCAL 8
-template <bool need_check> static __global__ void mul_mat_q5_K(
+template <bool need_check> static __global__ void
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ __launch_bounds__(WARP_SIZE*NWARPS_Q5_K_RDNA2, 2)
+#endif // defined(RDNA3) || defined(RDNA2)
+#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+mul_mat_q5_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
-#if __CUDA_ARCH__ >= CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ const int mmq_x = MMQ_X_Q5_K_RDNA2;
+ const int mmq_y = MMQ_Y_Q5_K_RDNA2;
+ const int nwarps = NWARPS_Q5_K_RDNA2;
+#else
+ const int mmq_x = MMQ_X_Q5_K_RDNA1;
+ const int mmq_y = MMQ_Y_Q5_K_RDNA1;
+ const int nwarps = NWARPS_Q5_K_RDNA1;
+#endif // defined(RDNA3) || defined(RDNA2)
+
+ mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps, allocate_tiles_q5_K<mmq_y>,
+ load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ, vec_dot_q5_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= CC_VOLTA
const int mmq_x = MMQ_X_Q5_K_AMPERE;
const int mmq_y = MMQ_Y_Q5_K_AMPERE;
const int nwarps = NWARPS_Q5_K_AMPERE;
#else
(void) vec_dot_q5_K_q8_1_mul_mat;
assert(false);
-#endif // __CUDA_ARCH__ >= CC_TURING
-}
-
+#endif // __CUDA_ARCH__ >= CC_VOLTA
+}
+
+#define MMQ_X_Q6_K_RDNA2 64
+#define MMQ_Y_Q6_K_RDNA2 128
+#define NWARPS_Q6_K_RDNA2 8
+#define MMQ_X_Q6_K_RDNA1 32
+#define MMQ_Y_Q6_K_RDNA1 64
+#define NWARPS_Q6_K_RDNA1 8
+#if defined(CUDA_USE_TENSOR_CORES)
+#define MMQ_X_Q6_K_AMPERE 4
+#define MMQ_Y_Q6_K_AMPERE 32
+#define NWARPS_Q6_K_AMPERE 4
+#else
#define MMQ_X_Q6_K_AMPERE 64
#define MMQ_Y_Q6_K_AMPERE 64
#define NWARPS_Q6_K_AMPERE 4
+#endif
#define MMQ_X_Q6_K_PASCAL 64
#define MMQ_Y_Q6_K_PASCAL 64
#define NWARPS_Q6_K_PASCAL 8
template <bool need_check> static __global__ void
-#if __CUDA_ARCH__ < CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ __launch_bounds__(WARP_SIZE*NWARPS_Q6_K_RDNA2, 2)
+#endif // defined(RDNA3) || defined(RDNA2)
+#elif __CUDA_ARCH__ < CC_VOLTA
__launch_bounds__(WARP_SIZE*NWARPS_Q6_K_PASCAL, 2)
-#endif // __CUDA_ARCH__ < CC_TURING
+#endif // __CUDA_ARCH__ < CC_VOLTA
mul_mat_q6_K(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) {
-#if __CUDA_ARCH__ >= CC_TURING
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+#if defined(RDNA3) || defined(RDNA2)
+ const int mmq_x = MMQ_X_Q6_K_RDNA2;
+ const int mmq_y = MMQ_Y_Q6_K_RDNA2;
+ const int nwarps = NWARPS_Q6_K_RDNA2;
+#else
+ const int mmq_x = MMQ_X_Q6_K_RDNA1;
+ const int mmq_y = MMQ_Y_Q6_K_RDNA1;
+ const int nwarps = NWARPS_Q6_K_RDNA1;
+#endif // defined(RDNA3) || defined(RDNA2)
+
+ mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps, allocate_tiles_q6_K<mmq_y>,
+ load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ, vec_dot_q6_K_q8_1_mul_mat>
+ (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst);
+
+#elif __CUDA_ARCH__ >= CC_VOLTA
const int mmq_x = MMQ_X_Q6_K_AMPERE;
const int mmq_y = MMQ_Y_Q6_K_AMPERE;
const int nwarps = NWARPS_Q6_K_AMPERE;
#else
(void) vec_dot_q6_K_q8_1_mul_mat;
assert(false);
-#endif // __CUDA_ARCH__ >= CC_TURING
+#endif // __CUDA_ARCH__ >= CC_VOLTA
}
template <int qk, int qi, typename block_q_t, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda>
const half * x = (const half *) vx;
- const int row_x = blockDim.y*blockIdx.y + threadIdx.y;
- const int channel = blockDim.z*blockIdx.z + threadIdx.z;
+ const int row_x = blockDim.y*blockIdx.y + threadIdx.y;
+ const int channel = blockDim.z*blockIdx.z + threadIdx.z;
const int channel_x = channel / channel_x_divisor;
- const int nrows_y = ncols_x;
+ const int nrows_y = ncols_x;
const int nrows_dst = nrows_x;
- const int row_dst = row_x;
+ const int row_dst = row_x;
const int idst = channel*nrows_dst + row_dst;
break;
}
- const int ix = channel_x*channel_stride_x + row_x*row_stride_x + col_x;
- const float xi = __half2float(x[ix]);
-
const int row_y = col_x;
+ const int ix = channel_x*channel_stride_x + row_x*row_stride_x + col_x;
const int iy = channel*nrows_y + row_y;
+ const float xi = __half2float(x[ix]);
+
tmp += xi * y[iy];
}
cpy_1(cx + x_offset, cdst + dst_offset);
}
+static __device__ float rope_yarn_ramp(const float low, const float high, const int i0) {
+ const float y = (i0 / 2 - low) / max(0.001f, high - low);
+ return 1.0f - min(1.0f, max(0.0f, y));
+}
+
+struct rope_corr_dims {
+ float v[4];
+};
+
+// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
+// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
+static __device__ void rope_yarn(
+ float theta_extrap, float freq_scale, rope_corr_dims corr_dims, int64_t i0, float ext_factor, float mscale,
+ float * cos_theta, float * sin_theta
+) {
+ // Get n-d rotational scaling corrected for extrapolation
+ float theta_interp = freq_scale * theta_extrap;
+ float theta = theta_interp;
+ if (ext_factor != 0.0f) {
+ float ramp_mix = rope_yarn_ramp(corr_dims.v[0], corr_dims.v[1], i0) * ext_factor;
+ theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
+
+ // Get n-d magnitude scaling corrected for interpolation
+ mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale);
+ }
+ *cos_theta = cosf(theta) * mscale;
+ *sin_theta = sinf(theta) * mscale;
+}
+
// rope == RoPE == rotary positional embedding
-static __global__ void rope_f32(const float * x, float * dst, const int ncols, const float p0,
- const float p_delta, const int p_delta_rows, const float theta_scale) {
+template<typename T, bool has_pos>
+static __global__ void rope(
+ const T * x, T * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base,
+ float ext_factor, float attn_factor, rope_corr_dims corr_dims
+) {
const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y);
if (col >= ncols) {
const int row = blockDim.x*blockIdx.x + threadIdx.x;
const int i = row*ncols + col;
+ const int i2 = row/p_delta_rows;
- const float theta = (p0 + p_delta * (row/p_delta_rows))*powf(theta_scale, col/2);
- const float sin_theta = sinf(theta);
- const float cos_theta = cosf(theta);
+ const int p = has_pos ? pos[i2] : 0;
+ const float theta_base = p*powf(freq_base, -float(col)/ncols);
+
+ float cos_theta, sin_theta;
+ rope_yarn(theta_base, freq_scale, corr_dims, col, ext_factor, attn_factor, &cos_theta, &sin_theta);
const float x0 = x[i + 0];
const float x1 = x[i + 1];
dst[i + 1] = x0*sin_theta + x1*cos_theta;
}
-static __global__ void rope_neox_f32(const float * x, float * dst, const int ncols, const float p0,
- const float p_delta, const int p_delta_rows, const float theta_scale) {
+template<typename T, bool has_pos>
+static __global__ void rope_neox(
+ const T * x, T * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base,
+ float ext_factor, float attn_factor, rope_corr_dims corr_dims
+) {
const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y);
if (col >= ncols) {
const int row = blockDim.x*blockIdx.x + threadIdx.x;
const int i = row*ncols + col/2;
+ const int i2 = row/p_delta_rows;
- const float theta = (p0 + p_delta * (row/p_delta_rows))*powf(theta_scale, col/2);
- const float sin_theta = sinf(theta);
- const float cos_theta = cosf(theta);
+ // simplified from `(ib * ncols + col) * (-1 / ncols)`, where ib is assumed to be zero
+ const float cur_rot = -float(col)/ncols;
+
+ const int p = has_pos ? pos[i2] : 0;
+ const float theta_base = p*powf(freq_base, cur_rot);
+
+ float cos_theta, sin_theta;
+ rope_yarn(theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta);
const float x0 = x[i + 0];
const float x1 = x[i + ncols/2];
dst[i + ncols/2] = x0*sin_theta + x1*cos_theta;
}
-static __global__ void rope_glm_f32(const float * x, float * dst, const int ncols, const float p0,
- const float p_delta, const int p_delta_rows, const float theta_scale, const int n_ctx) {
+static __global__ void rope_glm_f32(
+ const float * x, float * dst, int ncols, const int32_t * pos, float freq_scale, int p_delta_rows, float freq_base,
+ int n_ctx
+) {
const int col = blockDim.x*blockIdx.x + threadIdx.x;
const int half_n_dims = ncols/4;
const int row = blockDim.y*blockIdx.y + threadIdx.y;
const int i = row*ncols + col;
+ const int i2 = row/p_delta_rows;
- const float col_theta_scale = powf(theta_scale, col);
- const float p = p0 + p_delta*(row/p_delta_rows);
+ const float col_theta_scale = powf(freq_base, -2.0f*col/ncols);
+ // FIXME: this is likely wrong
+ const int p = pos != nullptr ? pos[i2] : 0;
- const float theta = min(p, p_delta*(n_ctx - 2))*col_theta_scale;
+ const float theta = min(p, n_ctx - 2)*freq_scale*col_theta_scale;
const float sin_theta = sinf(theta);
const float cos_theta = cosf(theta);
dst[i + 0] = x0*cos_theta - x1*sin_theta;
dst[i + half_n_dims] = x0*sin_theta + x1*cos_theta;
- const float block_theta = max(p - p_delta*(n_ctx - 2), 0.f)*col_theta_scale;
+ const float block_theta = ((float)max(p - n_ctx - 2, 0))*col_theta_scale;
const float sin_block_theta = sinf(block_theta);
const float cos_block_theta = cosf(block_theta);
dst[i] = scale * x[i];
}
+static __global__ void clamp_f32(const float * x, float * dst, const float min, const float max, const int k) {
+ const int i = blockDim.x*blockIdx.x + threadIdx.x;
+
+ if (i >= k) {
+ return;
+ }
+
+ dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]);
+}
+
+template<int qk, int qr, dequantize_kernel_t dq>
+static void get_rows_cuda(const void * x, const int32_t * y, float * dst, const int nrows, const int ncols, cudaStream_t stream) {
+ const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1);
+ const int block_num_x = (ncols + 2*CUDA_GET_ROWS_BLOCK_SIZE - 1) / (2*CUDA_GET_ROWS_BLOCK_SIZE);
+ const dim3 block_nums(block_num_x, nrows, 1);
+ k_get_rows<qk, qr, dq><<<block_nums, block_dims, 0, stream>>>(x, y, dst, ncols);
+}
+
static void add_f32_cuda(const float * x, const float * y, float * dst, const int kx, const int ky, cudaStream_t stream) {
const int num_blocks = (kx + CUDA_ADD_BLOCK_SIZE - 1) / CUDA_ADD_BLOCK_SIZE;
add_f32<<<num_blocks, CUDA_ADD_BLOCK_SIZE, 0, stream>>>(x, y, dst, kx, ky);
add_f16_f32_f16<<<num_blocks, CUDA_ADD_BLOCK_SIZE, 0, stream>>>(x, y, dst, k);
}
+static void add_f16_f32_f32_cuda(const half * x, const float * y, float * dst, const int k, cudaStream_t stream) {
+ const int num_blocks = (k + CUDA_ADD_BLOCK_SIZE - 1) / CUDA_ADD_BLOCK_SIZE;
+ add_f16_f32_f32<<<num_blocks, CUDA_ADD_BLOCK_SIZE, 0, stream>>>(x, y, dst, k);
+}
+
static void mul_f32_cuda(const float * x, const float * y, float * dst, const int kx, const int ky, cudaStream_t stream) {
const int num_blocks = (kx + CUDA_MUL_BLOCK_SIZE - 1) / CUDA_MUL_BLOCK_SIZE;
mul_f32<<<num_blocks, CUDA_MUL_BLOCK_SIZE, 0, stream>>>(x, y, dst, kx, ky);
quantize_q8_1<<<num_blocks, block_size, 0, stream>>>(x, vy, kx, kx_padded);
}
-static void dequantize_row_q4_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
+template<typename dst_t>
+static void dequantize_row_q4_0_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
dequantize_block<QK4_0, QR4_0, dequantize_q4_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
-static void dequantize_row_q4_1_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
+template<typename dst_t>
+static void dequantize_row_q4_1_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
dequantize_block<QK4_1, QR4_1, dequantize_q4_1><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
-static void dequantize_row_q5_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
+template<typename dst_t>
+static void dequantize_row_q5_0_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
dequantize_block<QK5_0, QR5_0, dequantize_q5_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
-static void dequantize_row_q5_1_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
+template<typename dst_t>
+static void dequantize_row_q5_1_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
dequantize_block<QK5_1, QR5_1, dequantize_q5_1><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
-static void dequantize_row_q8_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
+template<typename dst_t>
+static void dequantize_row_q8_0_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
dequantize_block<QK8_0, QR8_0, dequantize_q8_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
-static void dequantize_row_q2_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
+template<typename dst_t>
+static void dequantize_row_q2_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
#if QK_K == 256
dequantize_block_q2_K<<<nb, 64, 0, stream>>>(vx, y);
#endif
}
-static void dequantize_row_q3_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
+template<typename dst_t>
+static void dequantize_row_q3_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
#if QK_K == 256
dequantize_block_q3_K<<<nb, 64, 0, stream>>>(vx, y);
#endif
}
-static void dequantize_row_q4_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
+template<typename dst_t>
+static void dequantize_row_q4_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
dequantize_block_q4_K<<<nb, 32, 0, stream>>>(vx, y);
}
-static void dequantize_row_q5_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
+template<typename dst_t>
+static void dequantize_row_q5_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
#if QK_K == 256
dequantize_block_q5_K<<<nb, 64, 0, stream>>>(vx, y);
#endif
}
-static void dequantize_row_q6_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
+template<typename dst_t>
+static void dequantize_row_q6_K_cuda(const void * vx, dst_t * y, const int k, cudaStream_t stream) {
const int nb = k / QK_K;
#if QK_K == 256
dequantize_block_q6_K<<<nb, 64, 0, stream>>>(vx, y);
dequantize_block<1, 1, convert_f16><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
}
+static void convert_fp32_to_fp16_cuda(const void * vx, half * y, const int k, cudaStream_t stream) {
+ const int num_blocks = (k + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE;
+ dequantize_block<1, 1, convert_f32><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
+}
+
static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
}
+static to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
+ switch (type) {
+ case GGML_TYPE_Q4_0:
+ return dequantize_row_q4_0_cuda;
+ case GGML_TYPE_Q4_1:
+ return dequantize_row_q4_1_cuda;
+ case GGML_TYPE_Q5_0:
+ return dequantize_row_q5_0_cuda;
+ case GGML_TYPE_Q5_1:
+ return dequantize_row_q5_1_cuda;
+ case GGML_TYPE_Q8_0:
+ return dequantize_row_q8_0_cuda;
+ case GGML_TYPE_Q2_K:
+ return dequantize_row_q2_K_cuda;
+ case GGML_TYPE_Q3_K:
+ return dequantize_row_q3_K_cuda;
+ case GGML_TYPE_Q4_K:
+ return dequantize_row_q4_K_cuda;
+ case GGML_TYPE_Q5_K:
+ return dequantize_row_q5_K_cuda;
+ case GGML_TYPE_Q6_K:
+ return dequantize_row_q6_K_cuda;
+ case GGML_TYPE_F32:
+ return convert_fp32_to_fp16_cuda;
+ default:
+ return nullptr;
+ }
+}
+
static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
switch (type) {
case GGML_TYPE_Q4_0:
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_TURING) {
+ if (compute_capability >= CC_RDNA2) {
+ mmq_x = MMQ_X_Q4_0_RDNA2;
+ mmq_y = MMQ_Y_Q4_0_RDNA2;
+ nwarps = NWARPS_Q4_0_RDNA2;
+ } else if (compute_capability >= CC_OFFSET_AMD) {
+ mmq_x = MMQ_X_Q4_0_RDNA1;
+ mmq_y = MMQ_Y_Q4_0_RDNA1;
+ nwarps = NWARPS_Q4_0_RDNA1;
+ } else if (compute_capability >= CC_VOLTA) {
mmq_x = MMQ_X_Q4_0_AMPERE;
mmq_y = MMQ_Y_Q4_0_AMPERE;
nwarps = NWARPS_Q4_0_AMPERE;
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_TURING) {
+ if (compute_capability >= CC_RDNA2) {
+ mmq_x = MMQ_X_Q4_1_RDNA2;
+ mmq_y = MMQ_Y_Q4_1_RDNA2;
+ nwarps = NWARPS_Q4_1_RDNA2;
+ } else if (compute_capability >= CC_OFFSET_AMD) {
+ mmq_x = MMQ_X_Q4_1_RDNA1;
+ mmq_y = MMQ_Y_Q4_1_RDNA1;
+ nwarps = NWARPS_Q4_1_RDNA1;
+ } else if (compute_capability >= CC_VOLTA) {
mmq_x = MMQ_X_Q4_1_AMPERE;
mmq_y = MMQ_Y_Q4_1_AMPERE;
nwarps = NWARPS_Q4_1_AMPERE;
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_TURING) {
+ if (compute_capability >= CC_RDNA2) {
+ mmq_x = MMQ_X_Q5_0_RDNA2;
+ mmq_y = MMQ_Y_Q5_0_RDNA2;
+ nwarps = NWARPS_Q5_0_RDNA2;
+ } else if (compute_capability >= CC_OFFSET_AMD) {
+ mmq_x = MMQ_X_Q5_0_RDNA1;
+ mmq_y = MMQ_Y_Q5_0_RDNA1;
+ nwarps = NWARPS_Q5_0_RDNA1;
+ } else if (compute_capability >= CC_VOLTA) {
mmq_x = MMQ_X_Q5_0_AMPERE;
mmq_y = MMQ_Y_Q5_0_AMPERE;
nwarps = NWARPS_Q5_0_AMPERE;
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_TURING) {
+ if (compute_capability >= CC_RDNA2) {
+ mmq_x = MMQ_X_Q5_1_RDNA2;
+ mmq_y = MMQ_Y_Q5_1_RDNA2;
+ nwarps = NWARPS_Q5_1_RDNA2;
+ } else if (compute_capability >= CC_OFFSET_AMD) {
+ mmq_x = MMQ_X_Q5_1_RDNA1;
+ mmq_y = MMQ_Y_Q5_1_RDNA1;
+ nwarps = NWARPS_Q5_1_RDNA1;
+ } else if (compute_capability >= CC_VOLTA) {
mmq_x = MMQ_X_Q5_1_AMPERE;
mmq_y = MMQ_Y_Q5_1_AMPERE;
nwarps = NWARPS_Q5_1_AMPERE;
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_TURING) {
+ if (compute_capability >= CC_RDNA2) {
+ mmq_x = MMQ_X_Q8_0_RDNA2;
+ mmq_y = MMQ_Y_Q8_0_RDNA2;
+ nwarps = NWARPS_Q8_0_RDNA2;
+ } else if (compute_capability >= CC_OFFSET_AMD) {
+ mmq_x = MMQ_X_Q8_0_RDNA1;
+ mmq_y = MMQ_Y_Q8_0_RDNA1;
+ nwarps = NWARPS_Q8_0_RDNA1;
+ } else if (compute_capability >= CC_VOLTA) {
mmq_x = MMQ_X_Q8_0_AMPERE;
mmq_y = MMQ_Y_Q8_0_AMPERE;
nwarps = NWARPS_Q8_0_AMPERE;
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_TURING) {
+ if (compute_capability >= CC_RDNA2) {
+ mmq_x = MMQ_X_Q2_K_RDNA2;
+ mmq_y = MMQ_Y_Q2_K_RDNA2;
+ nwarps = NWARPS_Q2_K_RDNA2;
+ } else if (compute_capability >= CC_OFFSET_AMD) {
+ mmq_x = MMQ_X_Q2_K_RDNA1;
+ mmq_y = MMQ_Y_Q2_K_RDNA1;
+ nwarps = NWARPS_Q2_K_RDNA1;
+ } else if (compute_capability >= CC_VOLTA) {
mmq_x = MMQ_X_Q2_K_AMPERE;
mmq_y = MMQ_Y_Q2_K_AMPERE;
nwarps = NWARPS_Q2_K_AMPERE;
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_TURING) {
+ if (compute_capability >= CC_RDNA2) {
+ mmq_x = MMQ_X_Q3_K_RDNA2;
+ mmq_y = MMQ_Y_Q3_K_RDNA2;
+ nwarps = NWARPS_Q3_K_RDNA2;
+ } else if (compute_capability >= CC_OFFSET_AMD) {
+ mmq_x = MMQ_X_Q3_K_RDNA1;
+ mmq_y = MMQ_Y_Q3_K_RDNA1;
+ nwarps = NWARPS_Q3_K_RDNA1;
+ } else if (compute_capability >= CC_VOLTA) {
mmq_x = MMQ_X_Q3_K_AMPERE;
mmq_y = MMQ_Y_Q3_K_AMPERE;
nwarps = NWARPS_Q3_K_AMPERE;
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_TURING) {
+ if (compute_capability >= CC_RDNA2) {
+ mmq_x = MMQ_X_Q4_K_RDNA2;
+ mmq_y = MMQ_Y_Q4_K_RDNA2;
+ nwarps = NWARPS_Q4_K_RDNA2;
+ } else if (compute_capability >= CC_OFFSET_AMD) {
+ mmq_x = MMQ_X_Q4_K_RDNA1;
+ mmq_y = MMQ_Y_Q4_K_RDNA1;
+ nwarps = NWARPS_Q4_K_RDNA1;
+ } else if (compute_capability >= CC_VOLTA) {
mmq_x = MMQ_X_Q4_K_AMPERE;
mmq_y = MMQ_Y_Q4_K_AMPERE;
nwarps = NWARPS_Q4_K_AMPERE;
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_TURING) {
+ if (compute_capability >= CC_RDNA2) {
+ mmq_x = MMQ_X_Q5_K_RDNA2;
+ mmq_y = MMQ_Y_Q5_K_RDNA2;
+ nwarps = NWARPS_Q5_K_RDNA2;
+ } else if (compute_capability >= CC_OFFSET_AMD) {
+ mmq_x = MMQ_X_Q5_K_RDNA1;
+ mmq_y = MMQ_Y_Q5_K_RDNA1;
+ nwarps = NWARPS_Q5_K_RDNA1;
+ } else if (compute_capability >= CC_VOLTA) {
mmq_x = MMQ_X_Q5_K_AMPERE;
mmq_y = MMQ_Y_Q5_K_AMPERE;
nwarps = NWARPS_Q5_K_AMPERE;
const int compute_capability = g_compute_capabilities[id];
int mmq_x, mmq_y, nwarps;
- if (compute_capability >= CC_TURING) {
+ if (compute_capability >= CC_RDNA2) {
+ mmq_x = MMQ_X_Q6_K_RDNA2;
+ mmq_y = MMQ_Y_Q6_K_RDNA2;
+ nwarps = NWARPS_Q6_K_RDNA2;
+ } else if (compute_capability >= CC_OFFSET_AMD) {
+ mmq_x = MMQ_X_Q6_K_RDNA1;
+ mmq_y = MMQ_Y_Q6_K_RDNA1;
+ nwarps = NWARPS_Q6_K_RDNA1;
+ } else if (compute_capability >= CC_VOLTA) {
mmq_x = MMQ_X_Q6_K_AMPERE;
mmq_y = MMQ_Y_Q6_K_AMPERE;
nwarps = NWARPS_Q6_K_AMPERE;
scale_f32<<<num_blocks, CUDA_SCALE_BLOCK_SIZE, 0, stream>>>(x, dst, scale, k);
}
-static void rope_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0,
- const float p_delta, const int p_delta_rows, const float theta_scale, cudaStream_t stream) {
+static void clamp_f32_cuda(const float * x, float * dst, const float min, const float max, const int k, cudaStream_t stream) {
+ const int num_blocks = (k + CUDA_CLAMP_BLOCK_SIZE - 1) / CUDA_CLAMP_BLOCK_SIZE;
+ clamp_f32<<<num_blocks, CUDA_CLAMP_BLOCK_SIZE, 0, stream>>>(x, dst, min, max, k);
+}
+
+template<typename T>
+static void rope_cuda(
+ const T * x, T * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows,
+ float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream
+) {
GGML_ASSERT(ncols % 2 == 0);
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
const dim3 block_nums(nrows, num_blocks_x, 1);
- rope_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale);
+ if (pos == nullptr) {
+ rope<T, false><<<block_nums, block_dims, 0, stream>>>(
+ x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims
+ );
+ } else {
+ rope<T, true><<<block_nums, block_dims, 0, stream>>>(
+ x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims
+ );
+ }
}
-static void rope_neox_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0,
- const float p_delta, const int p_delta_rows, const float theta_scale, cudaStream_t stream) {
+template<typename T>
+static void rope_neox_cuda(
+ const T * x, T * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows,
+ float freq_base, float ext_factor, float attn_factor, rope_corr_dims corr_dims, cudaStream_t stream
+) {
GGML_ASSERT(ncols % 2 == 0);
const dim3 block_dims(1, CUDA_ROPE_BLOCK_SIZE, 1);
const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
const dim3 block_nums(nrows, num_blocks_x, 1);
- rope_neox_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale);
+ if (pos == nullptr) {
+ rope_neox<T, false><<<block_nums, block_dims, 0, stream>>>(
+ x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims
+ );
+ } else {
+ rope_neox<T, true><<<block_nums, block_dims, 0, stream>>>(
+ x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, ext_factor, attn_factor, corr_dims
+ );
+ }
}
-static void rope_glm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0,
- const float p_delta, const int p_delta_rows, const float theta_scale, const int n_ctx, cudaStream_t stream) {
+static void rope_glm_f32_cuda(
+ const float * x, float * dst, int ncols, int nrows, const int32_t * pos, float freq_scale, int p_delta_rows,
+ float freq_base, int n_ctx, cudaStream_t stream
+) {
GGML_ASSERT(ncols % 4 == 0);
const dim3 block_dims(CUDA_ROPE_BLOCK_SIZE/4, 1, 1);
const int num_blocks_x = (ncols + CUDA_ROPE_BLOCK_SIZE - 1) / CUDA_ROPE_BLOCK_SIZE;
const dim3 block_nums(num_blocks_x, nrows, 1);
- rope_glm_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale, n_ctx);
+ rope_glm_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, pos, freq_scale, p_delta_rows, freq_base, n_ctx);
}
static void alibi_f32_cuda(const float * x, float * dst, const int ncols, const int nrows,
return ptr;
}
+static void * ggml_cuda_pool_malloc_async(size_t size, size_t * actual_size, int id, cudaStream_t stream) {
+ if (g_cudaMemPools[id] == nullptr) {
+ return ggml_cuda_pool_malloc(size, actual_size);
+ }
+ void *ptr;
+ CUDA_CHECK(cudaMallocFromPoolAsync(&ptr, size, g_cudaMemPools[id], stream));
+ *actual_size = size;
+ return ptr;
+}
+
static void ggml_cuda_pool_free(void * ptr, size_t size) {
scoped_spin_lock lock(g_cuda_pool_lock);
int id;
}
+static void ggml_cuda_pool_free_async(void * ptr, size_t actual_size, int id, cudaStream_t stream) {
+ if (g_cudaMemPools[id] == nullptr) {
+ return ggml_cuda_pool_free(ptr, actual_size);
+ }
+ CUDA_CHECK(cudaFreeAsync(ptr, stream));
+}
+
void ggml_init_cublas() {
static bool initialized = false;
CUDA_CHECK(cudaGetDeviceCount(&g_device_count));
GGML_ASSERT(g_device_count <= GGML_CUDA_MAX_DEVICES);
int64_t total_vram = 0;
+#if defined(GGML_CUDA_FORCE_MMQ)
+ fprintf(stderr, "%s: GGML_CUDA_FORCE_MMQ: yes\n", __func__);
+#else
+ fprintf(stderr, "%s: GGML_CUDA_FORCE_MMQ: no\n", __func__);
+#endif
+#if defined(CUDA_USE_TENSOR_CORES)
+ fprintf(stderr, "%s: CUDA_USE_TENSOR_CORES: yes\n", __func__);
+#else
+ fprintf(stderr, "%s: CUDA_USE_TENSOR_CORES: no\n", __func__);
+#endif
fprintf(stderr, "%s: found %d " GGML_CUDA_NAME " devices:\n", __func__, g_device_count);
for (int id = 0; id < g_device_count; ++id) {
cudaDeviceProp prop;
g_tensor_split[id] = total_vram;
total_vram += prop.totalGlobalMem;
-
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+ g_compute_capabilities[id] = 100*prop.major + 10*prop.minor + CC_OFFSET_AMD;
+#else
g_compute_capabilities[id] = 100*prop.major + 10*prop.minor;
+#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
}
for (int id = 0; id < g_device_count; ++id) {
g_tensor_split[id] /= total_vram;
}
for (int id = 0; id < g_device_count; ++id) {
- CUDA_CHECK(cudaSetDevice(id));
+ CUDA_CHECK(ggml_cuda_set_device(id));
- // create main stream
- CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams_main[id], cudaStreamNonBlocking));
+ // create cuda streams
+ for (int is = 0; is < MAX_STREAMS; ++is) {
+ CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams[id][is], cudaStreamNonBlocking));
+ }
// create cublas handle
CUBLAS_CHECK(cublasCreate(&g_cublas_handles[id]));
CUBLAS_CHECK(cublasSetMathMode(g_cublas_handles[id], CUBLAS_TF32_TENSOR_OP_MATH));
+
+ // configure memory pool
+ cudaError_t err = cudaDeviceGetMemPool(&g_cudaMemPools[id], id);
+ if (err == cudaSuccess) {
+ size_t treshold = UINT64_MAX;
+ CUDA_CHECK(cudaMemPoolSetAttribute(g_cudaMemPools[id], cudaMemPoolAttrReleaseThreshold, &treshold));
+ }
}
// configure logging to stdout
if (src->backend == GGML_BACKEND_CPU) {
kind = cudaMemcpyHostToDevice;
src_ptr = (char *) src->data;
- } else if (src->backend == GGML_BACKEND_GPU) {
+ } else if (src->backend == GGML_BACKEND_GPU || src->backend == GGML_BACKEND_GPU_SPLIT) {
+ GGML_ASSERT(src->backend != GGML_BACKEND_GPU_SPLIT || (i1_low == 0 && i1_high == src->ne[1]));
kind = cudaMemcpyDeviceToDevice;
- struct ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src->extra;
+ ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src->extra;
int id;
CUDA_CHECK(cudaGetDevice(&id));
src_ptr = (char *) extra->data_device[id];
}
}
-inline void ggml_cuda_op_add(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
-
- GGML_ASSERT(src0_ddq_i != nullptr || src0_ddf_i != nullptr);
- GGML_ASSERT(src1_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
+static void ggml_cuda_op_repeat(
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_d, const float * src1_d, float * dst_d, const cudaStream_t & stream) {
+ // guaranteed to be an integer due to the check in ggml_can_repeat
+ const int64_t ne0 = dst->ne[0];
+ const int64_t ne1 = dst->ne[1];
+ const int64_t ne2 = dst->ne[2];
+ const int64_t ne3 = dst->ne[3];
const int64_t ne00 = src0->ne[0];
- const int64_t i01_diff = i01_high - i01_low;
+ const int64_t ne01 = src0->ne[1];
+ const int64_t ne02 = src0->ne[2];
+ const int64_t ne03 = src0->ne[3];
+
+ const size_t nb0 = dst->nb[0];
+ const size_t nb1 = dst->nb[1];
+ const size_t nb2 = dst->nb[2];
+ const size_t nb3 = dst->nb[3];
+
+ const size_t nb00 = src0->nb[0];
+ const size_t nb01 = src0->nb[1];
+ const size_t nb02 = src0->nb[2];
+ const size_t nb03 = src0->nb[3];
+
+ const int nr0 = (int)(ne0/ne00);
+ const int nr1 = (int)(ne1/ne01);
+ const int nr2 = (int)(ne2/ne02);
+ const int nr3 = (int)(ne3/ne03);
+
+ // TODO: support for transposed / permuted tensors
+ GGML_ASSERT(nb0 == sizeof(float));
+ GGML_ASSERT(nb00 == sizeof(float));
+
+ // TODO: very inefficient, implement in a kernel, or fewer cudaMemcpyAsync calls for contiguous tensors
+ for (int i3 = 0; i3 < nr3; i3++) {
+ for (int k3 = 0; k3 < ne03; k3++) {
+ for (int i2 = 0; i2 < nr2; i2++) {
+ for (int k2 = 0; k2 < ne02; k2++) {
+ for (int i1 = 0; i1 < nr1; i1++) {
+ for (int k1 = 0; k1 < ne01; k1++) {
+ for (int i0 = 0; i0 < nr0; i0++) {
+ CUDA_CHECK(cudaMemcpyAsync(
+ (char *) dst_d + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0,
+ (const char *) src0_d + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01,
+ ne00*nb0, cudaMemcpyDeviceToDevice, stream));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ (void) src1;
+ (void) src1_d;
+}
+
+static void ggml_cuda_op_get_rows(
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_d, const float * src1_d, float * dst_d, const cudaStream_t & stream) {
+
+ GGML_ASSERT(src1->type == GGML_TYPE_I32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(ggml_is_contiguous(src0));
+ GGML_ASSERT(ggml_is_contiguous(src1));
+ GGML_ASSERT(ggml_is_contiguous(dst));
+
+ const int ncols = src0->ne[0];
+ const int nrows = ggml_nelements(src1);
+
+ const int32_t * src1_i32 = (const int32_t *) src1_d;
+
+ switch (src0->type) {
+ case GGML_TYPE_F16:
+ get_rows_cuda<1, 1, convert_f16>(src0_d, src1_i32, dst_d, nrows, ncols, stream);
+ break;
+ case GGML_TYPE_F32:
+ get_rows_cuda<1, 1, convert_f32>(src0_d, src1_i32, dst_d, nrows, ncols, stream);
+ break;
+ case GGML_TYPE_Q4_0:
+ get_rows_cuda<QK4_0, QR4_0, dequantize_q4_0>(src0_d, src1_i32, dst_d, nrows, ncols, stream);
+ break;
+ case GGML_TYPE_Q4_1:
+ get_rows_cuda<QK4_1, QR4_1, dequantize_q4_1>(src0_d, src1_i32, dst_d, nrows, ncols, stream);
+ break;
+ case GGML_TYPE_Q5_0:
+ get_rows_cuda<QK5_0, QR5_0, dequantize_q5_0>(src0_d, src1_i32, dst_d, nrows, ncols, stream);
+ break;
+ case GGML_TYPE_Q5_1:
+ get_rows_cuda<QK5_1, QR5_1, dequantize_q5_1>(src0_d, src1_i32, dst_d, nrows, ncols, stream);
+ break;
+ case GGML_TYPE_Q8_0:
+ get_rows_cuda<QK8_0, QR8_0, dequantize_q8_0>(src0_d, src1_i32, dst_d, nrows, ncols, stream);
+ break;
+ default:
+ // TODO: k-quants
+ GGML_ASSERT(false);
+ break;
+ }
+}
+
+inline void ggml_cuda_op_add(
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
+
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
- // compute
if (src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
- add_f32_cuda(src0_ddf_i, src1_ddf_i, dst_ddf_i, ne00*i01_diff, ne10*ne11, cudaStream_main);
+ add_f32_cuda(src0_dd, src1_dd, dst_dd, ggml_nelements(src0), ne10*ne11, main_stream);
} else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
- add_f16_f32_f16_cuda((half *) src0_ddq_i, src1_ddf_i, (half *) dst_ddf_i, ne00*i01_diff, cudaStream_main);
+ add_f16_f32_f16_cuda((const half *) src0_dd, src1_dd, (half *) dst_dd, ggml_nelements(src0), main_stream);
+ } else if (src0->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
+ add_f16_f32_f32_cuda((const half *) src0_dd, src1_dd, dst_dd, ggml_nelements(src0), main_stream);
} else {
+ fprintf(stderr, "src0->type: %d dst->type: %d\n", src0->type, dst->type);
GGML_ASSERT(false);
}
(void) src1;
(void) dst;
- (void) src0_ddq_i;
- (void) i02;
- (void) i1;
}
inline void ggml_cuda_op_mul(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
- GGML_ASSERT(src0_ddf_i != nullptr);
- GGML_ASSERT(src1_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
-
- const int64_t ne00 = src0->ne[0];
- const int64_t i01_diff = i01_high - i01_low;
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
- mul_f32_cuda(src0_ddf_i, src1_ddf_i, dst_ddf_i, ne00*i01_diff, ne10*ne11, cudaStream_main);
+ mul_f32_cuda(src0_dd, src1_dd, dst_dd, ggml_nelements(src0), ne10*ne11, main_stream);
(void) dst;
- (void) src0_ddq_i;
- (void) i02;
- (void) i1;
}
inline void ggml_cuda_op_gelu(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
- GGML_ASSERT(src0_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
- const int64_t ne00 = src0->ne[0];
- const int64_t i01_diff = i01_high - i01_low;
-
- // compute
- gelu_f32_cuda(src0_ddf_i, dst_ddf_i, ne00*i01_diff, cudaStream_main);
+ gelu_f32_cuda(src0_dd, dst_dd, ggml_nelements(src0), main_stream);
(void) src1;
(void) dst;
- (void) src0_ddq_i;
- (void) src1_ddf_i;
- (void) i02;
- (void) i1;
+ (void) src1_dd;
}
inline void ggml_cuda_op_silu(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
- GGML_ASSERT(src0_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
- const int64_t ne00 = src0->ne[0];
- const int64_t i01_diff = i01_high - i01_low;
-
- // compute
- silu_f32_cuda(src0_ddf_i, dst_ddf_i, ne00*i01_diff, cudaStream_main);
+ silu_f32_cuda(src0_dd, dst_dd, ggml_nelements(src0), main_stream);
(void) src1;
(void) dst;
- (void) src0_ddq_i;
- (void) src1_ddf_i;
- (void) i02;
- (void) i1;
+ (void) src1_dd;
}
inline void ggml_cuda_op_norm(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
- GGML_ASSERT(src0_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
const int64_t ne00 = src0->ne[0];
- const int64_t i01_diff = i01_high - i01_low;
+ const int64_t nrows = ggml_nrows(src0);
- // compute
- norm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
+ norm_f32_cuda(src0_dd, dst_dd, ne00, nrows, main_stream);
(void) src1;
(void) dst;
- (void) src0_ddq_i;
- (void) src1_ddf_i;
- (void) i02;
- (void) i1;
+ (void) src1_dd;
}
inline void ggml_cuda_op_rms_norm(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
- GGML_ASSERT(src0_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
const int64_t ne00 = src0->ne[0];
- const int64_t i01_diff = i01_high - i01_low;
+ const int64_t nrows = ggml_nrows(src0);
float eps;
memcpy(&eps, dst->op_params, sizeof(float));
- // compute
- rms_norm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, eps, cudaStream_main);
+ rms_norm_f32_cuda(src0_dd, dst_dd, ne00, nrows, eps, main_stream);
(void) src1;
(void) dst;
- (void) src0_ddq_i;
- (void) src1_ddf_i;
- (void) i02;
- (void) i1;
+ (void) src1_dd;
}
inline void ggml_cuda_op_mul_mat_q(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
-
- GGML_ASSERT(src0_ddq_i != nullptr);
- GGML_ASSERT(src1_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
+ const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
+ const int64_t src1_padded_row_size, const cudaStream_t & stream) {
const int64_t ne00 = src0->ne[0];
const int64_t ne10 = src1->ne[0];
- const int64_t ne11 = src1->ne[1];
GGML_ASSERT(ne10 % QK8_1 == 0);
const int64_t ne0 = dst->ne[0];
- const int64_t i01_diff = i01_high - i01_low;
+ const int64_t row_diff = row_high - row_low;
int id;
CUDA_CHECK(cudaGetDevice(&id));
// the main device has a larger memory buffer to hold the results from all GPUs
// nrows_dst == nrows of the matrix that the dequantize_mul_mat kernel writes into
- const int64_t nrows_dst = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : i01_diff;
-
- const int64_t padded_row_size = ne10 % MATRIX_ROW_PADDING == 0 ?
- ne10 : ne10 - ne10 % MATRIX_ROW_PADDING + MATRIX_ROW_PADDING;
- size_t as;
- void * src1_q8_1 = ggml_cuda_pool_malloc(padded_row_size*ne11*sizeof(block_q8_1)/QK8_1, &as);
- quantize_row_q8_1_cuda(src1_ddf_i, src1_q8_1, ne10, ne11, padded_row_size, cudaStream_main);
+ const int64_t nrows_dst = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : row_diff;
switch (src0->type) {
case GGML_TYPE_Q4_0:
- ggml_mul_mat_q4_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
+ ggml_mul_mat_q4_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
break;
case GGML_TYPE_Q4_1:
- ggml_mul_mat_q4_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
+ ggml_mul_mat_q4_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
break;
case GGML_TYPE_Q5_0:
- ggml_mul_mat_q5_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
+ ggml_mul_mat_q5_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
break;
case GGML_TYPE_Q5_1:
- ggml_mul_mat_q5_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
+ ggml_mul_mat_q5_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
break;
case GGML_TYPE_Q8_0:
- ggml_mul_mat_q8_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
+ ggml_mul_mat_q8_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
break;
case GGML_TYPE_Q2_K:
- ggml_mul_mat_q2_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
+ ggml_mul_mat_q2_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
break;
case GGML_TYPE_Q3_K:
- ggml_mul_mat_q3_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
+ ggml_mul_mat_q3_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
break;
case GGML_TYPE_Q4_K:
- ggml_mul_mat_q4_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
+ ggml_mul_mat_q4_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
break;
case GGML_TYPE_Q5_K:
- ggml_mul_mat_q5_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
+ ggml_mul_mat_q5_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
break;
case GGML_TYPE_Q6_K:
- ggml_mul_mat_q6_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, i01_diff, ne11, padded_row_size, nrows_dst, cudaStream_main);
+ ggml_mul_mat_q6_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
break;
default:
GGML_ASSERT(false);
break;
}
- ggml_cuda_pool_free(src1_q8_1, as);
-
(void) src1;
(void) dst;
- (void) src0_ddf_i;
- (void) i02;
- (void) i1;
+ (void) src1_ddf_i;
}
static int64_t get_row_rounding(ggml_type type) {
- int max_compute_capability = INT_MIN;
- for (int id = 0; id < g_device_count; ++id) {
- if (max_compute_capability < g_compute_capabilities[id]
- && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) {
- max_compute_capability = g_compute_capabilities[id];
+ int64_t min_compute_capability = INT_MAX;
+ int64_t max_compute_capability = INT_MIN;
+ for (int64_t id = 0; id < g_device_count; ++id) {
+ if (g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) {
+ if (min_compute_capability > g_compute_capabilities[id]) {
+ min_compute_capability = g_compute_capabilities[id];
+ }
+ if (max_compute_capability < g_compute_capabilities[id]) {
+ max_compute_capability = g_compute_capabilities[id];
+ }
}
}
+#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+ switch(type) {
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ return max_compute_capability >= CC_RDNA2 ? 128 : 64;
+ case GGML_TYPE_F16:
+ return 1;
+ case GGML_TYPE_Q2_K:
+ return max_compute_capability >= CC_RDNA2 ? 128 : 32;
+ case GGML_TYPE_Q3_K:
+ return min_compute_capability < CC_RDNA2 ? 128 : 64;
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q5_K:
+ case GGML_TYPE_Q6_K:
+ return max_compute_capability >= CC_RDNA2 ? 128 : 64;
+ default:
+ GGML_ASSERT(false);
+ }
+#else
switch(type) {
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
- return max_compute_capability >= CC_TURING ? 128 : 64;
+ return max_compute_capability >= CC_VOLTA ? 128 : 64;
case GGML_TYPE_Q5_0:
case GGML_TYPE_Q5_1:
case GGML_TYPE_Q8_0:
case GGML_TYPE_Q3_K:
case GGML_TYPE_Q4_K:
case GGML_TYPE_Q5_K:
- return max_compute_capability >= CC_TURING ? 128 : 64;
+ return max_compute_capability >= CC_VOLTA ? 128 : 64;
case GGML_TYPE_Q6_K:
return 64;
default:
GGML_ASSERT(false);
}
+#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
}
-inline void ggml_cuda_op_mul_mat_vec(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
-
- GGML_ASSERT(src0_ddq_i != nullptr);
- GGML_ASSERT(src1_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
+inline void ggml_cuda_op_mul_mat_vec_q(
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
+ const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
+ const int64_t src1_padded_row_size, const cudaStream_t & stream) {
const int64_t ne00 = src0->ne[0];
- const int64_t nrows = i01_high - i01_low;
+ const int64_t row_diff = row_high - row_low;
-#ifdef GGML_CUDA_FORCE_DMMV
- const bool use_mul_mat_vec_q = false;
- (void) g_compute_capabilities[0];
-#else
- int id;
- CUDA_CHECK(cudaGetDevice(&id));
+ switch (src0->type) {
+ case GGML_TYPE_Q4_0:
+ mul_mat_vec_q4_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q4_1:
+ mul_mat_vec_q4_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q5_0:
+ mul_mat_vec_q5_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q5_1:
+ mul_mat_vec_q5_1_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q8_0:
+ mul_mat_vec_q8_0_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q2_K:
+ mul_mat_vec_q2_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q3_K:
+ mul_mat_vec_q3_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q4_K:
+ mul_mat_vec_q4_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q5_K:
+ mul_mat_vec_q5_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q6_K:
+ mul_mat_vec_q6_K_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ default:
+ GGML_ASSERT(false);
+ break;
+ }
- bool mul_mat_vec_q_implemented =
- src0->type == GGML_TYPE_Q4_0 ||
- src0->type == GGML_TYPE_Q4_1 ||
- src0->type == GGML_TYPE_Q5_0 ||
- src0->type == GGML_TYPE_Q5_1 ||
- src0->type == GGML_TYPE_Q8_0;
-#if QK_K == 256
- mul_mat_vec_q_implemented = mul_mat_vec_q_implemented ||
- src0->type == GGML_TYPE_Q2_K ||
- src0->type == GGML_TYPE_Q3_K ||
- src0->type == GGML_TYPE_Q4_K ||
- src0->type == GGML_TYPE_Q5_K ||
- src0->type == GGML_TYPE_Q6_K;
-#endif // QK_K == 256
-
- const bool use_mul_mat_vec_q = g_compute_capabilities[id] >= MIN_CC_DP4A && mul_mat_vec_q_implemented;
-#endif
+ (void) src1;
+ (void) dst;
+ (void) src1_ddf_i;
+ (void) src1_ncols;
+ (void) src1_padded_row_size;
+}
- if (use_mul_mat_vec_q) {
- const int64_t padded_row_size = ne00 % MATRIX_ROW_PADDING == 0 ?
- ne00 : ne00 - ne00 % MATRIX_ROW_PADDING + MATRIX_ROW_PADDING;
- size_t as;
- void * src1_q8_1 = ggml_cuda_pool_malloc(padded_row_size*sizeof(block_q8_1)/QK8_1, &as);
- quantize_row_q8_1_cuda(src1_ddf_i, src1_q8_1, ne00, 1, padded_row_size, cudaStream_main);
-
- switch (src0->type) {
- case GGML_TYPE_Q4_0:
- mul_mat_vec_q4_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q4_1:
- mul_mat_vec_q4_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q5_0:
- mul_mat_vec_q5_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q5_1:
- mul_mat_vec_q5_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q8_0:
- mul_mat_vec_q8_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q2_K:
- mul_mat_vec_q2_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q3_K:
- mul_mat_vec_q3_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q4_K:
- mul_mat_vec_q4_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q5_K:
- mul_mat_vec_q5_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q6_K:
- mul_mat_vec_q6_K_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- default:
- GGML_ASSERT(false);
- break;
- }
+inline void ggml_cuda_op_dequantize_mul_mat_vec(
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
+ const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
+ const int64_t src1_padded_row_size, const cudaStream_t & stream) {
- ggml_cuda_pool_free(src1_q8_1, as);
- } else {
- // on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
+ const int64_t ne00 = src0->ne[0];
+ const int64_t row_diff = row_high - row_low;
+
+ // on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
#ifdef GGML_CUDA_F16
- size_t ash;
- dfloat * src1_dfloat = nullptr; // dfloat == half
-
- bool src1_convert_f16 = src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 ||
- src0->type == GGML_TYPE_Q5_0 || src0->type == GGML_TYPE_Q5_1 ||
- src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16;
-
- if (src1_convert_f16) {
- src1_dfloat = (half *) ggml_cuda_pool_malloc(ne00*sizeof(half), &ash);
- ggml_cpy_f32_f16_cuda((char *) src1_ddf_i, (char *) src1_dfloat, ne00,
- ne00, 1, sizeof(float), 0, 0,
- ne00, 1, sizeof(half), 0, 0, cudaStream_main);
- }
+ size_t ash;
+ dfloat * src1_dfloat = nullptr; // dfloat == half
+
+ bool src1_convert_f16 = src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 ||
+ src0->type == GGML_TYPE_Q5_0 || src0->type == GGML_TYPE_Q5_1 ||
+ src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16;
+
+ if (src1_convert_f16) {
+ src1_dfloat = (half *) ggml_cuda_pool_malloc(ne00*sizeof(half), &ash);
+ ggml_cpy_f32_f16_cuda((const char *) src1_ddf_i, (char *) src1_dfloat, ne00,
+ ne00, 1, sizeof(float), 0, 0,
+ ne00, 1, sizeof(half), 0, 0, stream);
+ }
#else
- dfloat * src1_dfloat = src1_ddf_i; // dfloat == float, no conversion
+ const dfloat * src1_dfloat = (const dfloat *) src1_ddf_i; // dfloat == float, no conversion
#endif // GGML_CUDA_F16
- switch (src0->type) {
- case GGML_TYPE_Q4_0:
- dequantize_mul_mat_vec_q4_0_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q4_1:
- dequantize_mul_mat_vec_q4_1_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q5_0:
- dequantize_mul_mat_vec_q5_0_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q5_1:
- dequantize_mul_mat_vec_q5_1_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q8_0:
- dequantize_mul_mat_vec_q8_0_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q2_K:
- dequantize_mul_mat_vec_q2_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q3_K:
- dequantize_mul_mat_vec_q3_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q4_K:
- dequantize_mul_mat_vec_q4_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q5_K:
- dequantize_mul_mat_vec_q5_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_Q6_K:
- dequantize_mul_mat_vec_q6_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- case GGML_TYPE_F16:
- convert_mul_mat_vec_f16_cuda(src0_ddq_i, src1_dfloat, dst_ddf_i, ne00, nrows, cudaStream_main);
- break;
- default:
- GGML_ASSERT(false);
- break;
- }
+ switch (src0->type) {
+ case GGML_TYPE_Q4_0:
+ dequantize_mul_mat_vec_q4_0_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q4_1:
+ dequantize_mul_mat_vec_q4_1_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q5_0:
+ dequantize_mul_mat_vec_q5_0_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q5_1:
+ dequantize_mul_mat_vec_q5_1_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q8_0:
+ dequantize_mul_mat_vec_q8_0_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q2_K:
+ dequantize_mul_mat_vec_q2_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q3_K:
+ dequantize_mul_mat_vec_q3_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q4_K:
+ dequantize_mul_mat_vec_q4_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q5_K:
+ dequantize_mul_mat_vec_q5_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q6_K:
+ dequantize_mul_mat_vec_q6_K_cuda(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_F16:
+ convert_mul_mat_vec_f16_cuda(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ break;
+ default:
+ GGML_ASSERT(false);
+ break;
+ }
#ifdef GGML_CUDA_F16
- if (src1_convert_f16) {
- ggml_cuda_pool_free(src1_dfloat, ash);
- }
-#endif // GGML_CUDA_F16
+ if (src1_convert_f16) {
+ ggml_cuda_pool_free(src1_dfloat, ash);
}
+#endif // GGML_CUDA_F16
(void) src1;
(void) dst;
- (void) src0_ddf_i;
- (void) i02;
- (void) i1;
+ (void) src1_ddq_i;
+ (void) src1_ncols;
+ (void) src1_padded_row_size;
}
inline void ggml_cuda_op_mul_mat_cublas(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
+ const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
+ const int64_t src1_padded_row_size, const cudaStream_t & stream) {
- GGML_ASSERT(src0_ddf_i != nullptr);
+ GGML_ASSERT(src0_dd_i != nullptr);
GGML_ASSERT(src1_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
-
- const float alpha = 1.0f;
- const float beta = 0.0f;
+ GGML_ASSERT(dst_dd_i != nullptr);
const int64_t ne00 = src0->ne[0];
-
const int64_t ne10 = src1->ne[0];
- const int64_t ne11 = src1->ne[1];
const int64_t ne0 = dst->ne[0];
- const int64_t i01_diff = i01_high - i01_low;
+
+ const int64_t row_diff = row_high - row_low;
int id;
CUDA_CHECK(cudaGetDevice(&id));
// the main device has a larger memory buffer to hold the results from all GPUs
// ldc == nrows of the matrix that cuBLAS writes into
- int ldc = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : i01_diff;
+ int ldc = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : row_diff;
+
+ const int compute_capability = g_compute_capabilities[id];
- CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], cudaStream_main));
- CUBLAS_CHECK(
- cublasSgemm(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
- i01_diff, ne11, ne10,
- &alpha, src0_ddf_i, ne00,
- src1_ddf_i, ne10,
- &beta, dst_ddf_i, ldc));
+ if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1]) {
+ // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32
+ half * src0_as_f16 = nullptr;
+ size_t src0_as = 0;
+ if (src0->type != GGML_TYPE_F16) {
+ const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src0->type);
+ GGML_ASSERT(to_fp16_cuda != nullptr);
+ size_t ne = row_diff*ne00;
+ src0_as_f16 = (half *) ggml_cuda_pool_malloc_async(ne * sizeof(half), &src0_as, id, stream);
+ to_fp16_cuda(src0_dd_i, src0_as_f16, ne, stream);
+ }
+ const half * src0_ptr = src0->type == GGML_TYPE_F16 ? (const half *) src0_dd_i : src0_as_f16;
+
+ half * src1_as_f16 = nullptr;
+ size_t src1_as = 0;
+ if (src1->type != GGML_TYPE_F16) {
+ const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type);
+ GGML_ASSERT(to_fp16_cuda != nullptr);
+ size_t ne = src1_ncols*ne10;
+ src1_as_f16 = (half *) ggml_cuda_pool_malloc_async(ne * sizeof(half), &src1_as, id, stream);
+ to_fp16_cuda(src1_ddf_i, src1_as_f16, ne, stream);
+ }
+ const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddq_i : src1_as_f16;
+ size_t dst_f16_as = 0;
+ half * dst_f16 = (half *) ggml_cuda_pool_malloc_async(row_diff*src1_ncols * sizeof(half), &dst_f16_as, id, stream);
+
+ const half alpha_f16 = 1.0f;
+ const half beta_f16 = 0.0f;
+
+ CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], stream));
+ CUBLAS_CHECK(
+ cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
+ row_diff, src1_ncols, ne10,
+ &alpha_f16, src0_ptr, CUDA_R_16F, ne00,
+ src1_ptr, CUDA_R_16F, ne10,
+ &beta_f16, dst_f16, CUDA_R_16F, ldc,
+ CUBLAS_COMPUTE_16F,
+ CUBLAS_GEMM_DEFAULT_TENSOR_OP));
+
+ const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16);
+ to_fp32_cuda(dst_f16, dst_dd_i, row_diff*src1_ncols, stream);
+
+ if (dst_f16_as != 0) {
+ ggml_cuda_pool_free_async(dst_f16, dst_f16_as, id, stream);
+ }
+
+ if (src0_as != 0) {
+ ggml_cuda_pool_free_async(src0_as_f16, src0_as, id, stream);
+ }
+ if (src1_as != 0) {
+ ggml_cuda_pool_free_async(src1_as_f16, src1_as, id, stream);
+ }
+ }
+ else {
+ float * src0_ddq_as_f32 = nullptr;
+ size_t src0_as = 0;
+
+ if (src0->type != GGML_TYPE_F32) {
+ const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type);
+ GGML_ASSERT(to_fp32_cuda != nullptr);
+ src0_ddq_as_f32 = (float *) ggml_cuda_pool_malloc_async(row_diff*ne00 * sizeof(float), &src0_as, id, stream); // NOLINT
+ to_fp32_cuda(src0_dd_i, src0_ddq_as_f32, row_diff*ne00, stream);
+ }
+ const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32;
+
+ const float alpha = 1.0f;
+ const float beta = 0.0f;
+
+ CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], stream));
+ CUBLAS_CHECK(
+ cublasSgemm(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
+ row_diff, src1_ncols, ne10,
+ &alpha, src0_ddf_i, ne00,
+ src1_ddf_i, ne10,
+ &beta, dst_dd_i, ldc));
+
+ if (src0_as != 0) {
+ ggml_cuda_pool_free_async(src0_ddq_as_f32, src0_as, id, stream);
+ }
+ }
(void) dst;
- (void) src0_ddq_i;
- (void) i02;
- (void) i1;
+ (void) src1_ddq_i;
+ (void) src1_padded_row_size;
}
inline void ggml_cuda_op_rope(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
- GGML_ASSERT(src0_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
+ GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+ GGML_ASSERT(src0->type == dst->type);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
- const int64_t i01_diff = i01_high - i01_low;
+ const int64_t ne2 = dst->ne[2];
+ const int64_t nrows = ggml_nrows(src0);
- const int n_past = ((int32_t *) dst->op_params)[0];
- const int n_dims = ((int32_t *) dst->op_params)[1];
- const int mode = ((int32_t *) dst->op_params)[2];
- const int n_ctx = ((int32_t *) dst->op_params)[3];
- // RoPE alteration for extended context
+ //const int n_past = ((int32_t *) dst->op_params)[0];
+ const int n_dims = ((int32_t *) dst->op_params)[1];
+ const int mode = ((int32_t *) dst->op_params)[2];
+ const int n_ctx = ((int32_t *) dst->op_params)[3];
+ const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
- float freq_base, freq_scale;
- memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
- memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
+ // RoPE alteration for extended context
+ float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
+ memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
+ memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
+ memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
+ memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
+ memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
+ memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
- const float theta_scale = powf(freq_base, -2.0f/n_dims);
- const float p0 = (((mode & 1) == 0 ? n_past : 0)) * freq_scale;
+ const int32_t * pos = nullptr;
+ if ((mode & 1) == 0) {
+ GGML_ASSERT(src1->type == GGML_TYPE_I32);
+ GGML_ASSERT(src1->ne[0] == ne2);
+ pos = (const int32_t *) src1_dd;
+ }
const bool is_neox = mode & 2;
const bool is_glm = mode & 4;
+ rope_corr_dims corr_dims;
+ ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims.v);
+
// compute
if (is_glm) {
- rope_glm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p0, freq_scale, ne01, theta_scale, n_ctx, cudaStream_main);
+ GGML_ASSERT(false);
+ rope_glm_f32_cuda(src0_dd, dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, n_ctx, main_stream);
} else if (is_neox) {
GGML_ASSERT(ne00 == n_dims && "ne00 != n_dims is not implemented for CUDA yet");
- rope_neox_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p0, freq_scale, ne01, theta_scale, cudaStream_main);
+ if (src0->type == GGML_TYPE_F32) {
+ rope_neox_cuda(
+ (const float *)src0_dd, (float *)dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor,
+ attn_factor, corr_dims, main_stream
+ );
+ } else if (src0->type == GGML_TYPE_F16) {
+ rope_neox_cuda(
+ (const half *)src0_dd, (half *)dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor,
+ attn_factor, corr_dims, main_stream
+ );
+ } else {
+ GGML_ASSERT(false);
+ }
} else {
- rope_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p0, freq_scale, ne01, theta_scale, cudaStream_main);
+ if (src0->type == GGML_TYPE_F32) {
+ rope_cuda(
+ (const float *)src0_dd, (float *)dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor,
+ attn_factor, corr_dims, main_stream
+ );
+ } else if (src0->type == GGML_TYPE_F16) {
+ rope_cuda(
+ (const half *)src0_dd, (half *)dst_dd, ne00, nrows, pos, freq_scale, ne01, freq_base, ext_factor,
+ attn_factor, corr_dims, main_stream
+ );
+ } else {
+ GGML_ASSERT(false);
+ }
}
(void) src1;
(void) dst;
- (void) src0_ddq_i;
- (void) src1_ddf_i;
- (void) i02;
- (void) i1;
+ (void) src1_dd;
}
inline void ggml_cuda_op_alibi(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
- GGML_ASSERT(src0_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
- const int64_t i01_diff = i01_high - i01_low;
+ const int64_t nrows = ggml_nrows(src0);
- const int n_past = ((int32_t *) dst->op_params)[0];
+ //const int n_past = ((int32_t *) dst->op_params)[0];
const int n_head = ((int32_t *) dst->op_params)[1];
float max_bias;
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
- GGML_ASSERT(ne01 + n_past == ne00);
+ //GGML_ASSERT(ne01 + n_past == ne00);
GGML_ASSERT(n_head == ne02);
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
- // compute
- alibi_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, ne01, n_heads_log2_floor, m0, m1, cudaStream_main);
+ alibi_f32_cuda(src0_dd, dst_dd, ne00, nrows, ne01, n_heads_log2_floor, m0, m1, main_stream);
(void) src1;
- (void) src0_ddq_i;
- (void) src1_ddf_i;
- (void) i02;
- (void) i1;
+ (void) src1_dd;
}
inline void ggml_cuda_op_diag_mask_inf(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
- GGML_ASSERT(src0_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
- const int64_t i01_diff = i01_high - i01_low;
+ const int nrows0 = ggml_nrows(src0);
const int n_past = ((int32_t *) dst->op_params)[0];
- // compute
- diag_mask_inf_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, ne01, n_past, cudaStream_main);
+ diag_mask_inf_f32_cuda(src0_dd, dst_dd, ne00, nrows0, ne01, n_past, main_stream);
(void) src1;
(void) dst;
- (void) src0_ddq_i;
- (void) src1_ddf_i;
- (void) i02;
- (void) i1;
+ (void) src1_dd;
}
inline void ggml_cuda_op_soft_max(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
- GGML_ASSERT(src0_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
const int64_t ne00 = src0->ne[0];
- const int64_t i01_diff = i01_high - i01_low;
+ const int64_t nrows = ggml_nrows(src0);
- // compute
- soft_max_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
+ soft_max_f32_cuda(src0_dd, dst_dd, ne00, nrows, main_stream);
(void) src1;
(void) dst;
- (void) src0_ddq_i;
- (void) src1_ddf_i;
- (void) i02;
- (void) i1;
+ (void) src1_dd;
}
inline void ggml_cuda_op_scale(
- const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
- float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
- cudaStream_t & cudaStream_main){
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
- GGML_ASSERT(src0_ddf_i != nullptr);
- GGML_ASSERT(dst_ddf_i != nullptr);
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
- const float scale = ((float *) src1->data)[0];
+ float scale;
+ // HACK: support for ggml backend interface
+ if (src1->backend == GGML_BACKEND_CPU) {
+ scale = ((float *) src1->data)[0];
+ } else {
+ // TODO: pass pointer to kernel instead of copying to host
+ CUDA_CHECK(cudaMemcpy(&scale, src1->data, sizeof(float), cudaMemcpyDeviceToHost));
+ }
- const int64_t ne00 = src0->ne[0];
- const int64_t i01_diff = i01_high - i01_low;
+ scale_f32_cuda(src0_dd, dst_dd, scale, ggml_nelements(src0), main_stream);
+ CUDA_CHECK(cudaGetLastError());
- // compute
- scale_f32_cuda(src0_ddf_i, dst_ddf_i, scale, ne00*i01_diff, cudaStream_main);
+ (void) src1;
+ (void) dst;
+ (void) src1_dd;
+}
+
+inline void ggml_cuda_op_clamp(
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
+ const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+
+ float min;
+ float max;
+ memcpy(&min, dst->op_params, sizeof(float));
+ memcpy(&max, (float *) dst->op_params + 1, sizeof(float));
+
+ clamp_f32_cuda(src0_dd, dst_dd, min, max, ggml_nelements(src0), main_stream);
CUDA_CHECK(cudaGetLastError());
(void) src1;
(void) dst;
- (void) src0_ddq_i;
- (void) src1_ddf_i;
- (void) i02;
- (void) i1;
+ (void) src1_dd;
+}
+
+static void ggml_cuda_op_flatten(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const ggml_cuda_op_flatten_t op) {
+ const int64_t nrows0 = ggml_nrows(src0);
+
+ const bool use_src1 = src1 != nullptr;
+ const int64_t nrows1 = use_src1 ? ggml_nrows(src1) : 1;
+
+ GGML_ASSERT(!use_src1 || src1->backend != GGML_BACKEND_GPU_SPLIT);
+ GGML_ASSERT( dst->backend != GGML_BACKEND_GPU_SPLIT);
+
+ ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
+ ggml_tensor_extra_gpu * src1_extra = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr;
+ ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
+
+ const bool src0_on_device = src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT;
+ const bool src1_on_device = use_src1 && src1->backend == GGML_BACKEND_GPU;
+ const bool dst_on_device = dst->backend == GGML_BACKEND_GPU;
+
+ const bool src1_stays_on_host = use_src1 && dst->op == GGML_OP_SCALE;
+
+ // dd = data device
+ float * src0_ddf = nullptr;
+ float * src1_ddf = nullptr;
+ float * dst_ddf = nullptr;
+
+ // as = actual size
+ size_t src0_asf = 0;
+ size_t src1_asf = 0;
+ size_t dst_asf = 0;
+
+ ggml_cuda_set_device(g_main_device);
+ const cudaStream_t main_stream = g_cudaStreams[g_main_device][0];
+
+ if (src0_on_device) {
+ src0_ddf = (float *) src0_extra->data_device[g_main_device];
+ } else {
+ src0_ddf = (float *) ggml_cuda_pool_malloc(ggml_nbytes(src0), &src0_asf);
+ CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_ddf, src0, 0, 0, 0, nrows0, main_stream));
+ }
+
+ if (use_src1 && !src1_stays_on_host) {
+ if (src1_on_device) {
+ src1_ddf = (float *) src1_extra->data_device[g_main_device];
+ } else {
+ src1_ddf = (float *) ggml_cuda_pool_malloc(ggml_nbytes(src1), &src1_asf);
+ CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src1_ddf, src1, 0, 0, 0, nrows1, main_stream));
+ }
+ }
+ if (dst_on_device) {
+ dst_ddf = (float *) dst_extra->data_device[g_main_device];
+ } else {
+ dst_ddf = (float *) ggml_cuda_pool_malloc(ggml_nbytes(dst), &dst_asf);
+ }
+
+ // do the computation
+ op(src0, src1, dst, src0_ddf, src1_ddf, dst_ddf, main_stream);
+ CUDA_CHECK(cudaGetLastError());
+
+ // copy dst to host if necessary
+ if (!dst_on_device) {
+ CUDA_CHECK(cudaMemcpyAsync(dst->data, dst_ddf, ggml_nbytes(dst), cudaMemcpyDeviceToHost, main_stream));
+ }
+
+ if (src0_asf > 0) {
+ ggml_cuda_pool_free(src0_ddf, src0_asf);
+ }
+ if (src1_asf > 0) {
+ ggml_cuda_pool_free(src1_ddf, src1_asf);
+ }
+ if (dst_asf > 0) {
+ ggml_cuda_pool_free(dst_ddf, dst_asf);
+ }
+
+ if (dst->backend == GGML_BACKEND_CPU) {
+ CUDA_CHECK(cudaDeviceSynchronize());
+ }
+}
+
+static void ggml_cuda_set_peer_access(const int n_tokens) {
+ static bool peer_access_enabled = false;
+
+ const bool enable_peer_access = n_tokens <= GGML_CUDA_PEER_MAX_BATCH_SIZE;
+
+ if (peer_access_enabled == enable_peer_access) {
+ return;
+ }
+
+#ifdef NDEBUG
+ for (int id = 0; id < g_device_count; ++id) {
+ CUDA_CHECK(ggml_cuda_set_device(id));
+
+ for (int id_other = 0; id_other < g_device_count; ++id_other) {
+ if (id == id_other) {
+ continue;
+ }
+ if (id != g_main_device && id_other != g_main_device) {
+ continue;
+ }
+
+ int can_access_peer;
+ CUDA_CHECK(cudaDeviceCanAccessPeer(&can_access_peer, id, id_other));
+ if (can_access_peer) {
+ if (enable_peer_access) {
+ CUDA_CHECK(cudaDeviceEnablePeerAccess(id_other, 0));
+ } else {
+ CUDA_CHECK(cudaDeviceDisablePeerAccess(id_other));
+ }
+ }
+ }
+ }
+#endif // NDEBUG
+
+ peer_access_enabled = enable_peer_access;
}
-static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
- ggml_cuda_op_t op, bool src0_needs_f32, bool flatten_rows) {
+static void ggml_cuda_op_mul_mat(
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, ggml_cuda_op_mul_mat_t op,
+ const bool convert_src1_to_q8_1) {
+
const int64_t ne00 = src0->ne[0];
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
const int64_t ne03 = src0->ne[3];
const int64_t nrows0 = ggml_nrows(src0);
- const bool use_src1 = src1 != nullptr;
- const int64_t ne10 = use_src1 ? src1->ne[0] : 1;
- const int64_t ne11 = use_src1 ? src1->ne[1] : 1;
- const int64_t ne12 = use_src1 ? src1->ne[2] : 1;
- const int64_t ne13 = use_src1 ? src1->ne[3] : 1;
- const int64_t nrows1 = use_src1 ? ggml_nrows(src1) : 1;
+ const int64_t ne10 = src1->ne[0];
+ const int64_t ne11 = src1->ne[1];
+ const int64_t ne12 = src1->ne[2];
+ const int64_t ne13 = src1->ne[3];
+ const int64_t nrows1 = ggml_nrows(src1);
GGML_ASSERT(ne03 == ne13);
const int64_t ne0 = dst->ne[0];
const int64_t ne1 = dst->ne[1];
- const int nb2 = dst->nb[2];
- const int nb3 = dst->nb[3];
+ const int nb2 = dst->nb[2];
+ const int nb3 = dst->nb[3];
+
+ ggml_cuda_set_peer_access(ne11);
GGML_ASSERT(dst->backend != GGML_BACKEND_GPU_SPLIT);
- GGML_ASSERT(!use_src1 || src1->backend != GGML_BACKEND_GPU_SPLIT);
+ GGML_ASSERT(src1->backend != GGML_BACKEND_GPU_SPLIT);
- // strides for iteration over dims 3 and 2
- const int64_t num_iters_0 = ne02 >= ne12 ? ne02*ne03 : ne12*ne13;
- const int64_t num_iters = flatten_rows ? 1 : num_iters_0;
- const int64_t stride_mod = flatten_rows ? num_iters_0 : 1;
- const int64_t src0_stride = ne00 * ne01 * stride_mod;
- const int64_t src1_stride = ne10 * ne11 * stride_mod;
- const int64_t dst_stride = ne0 * ne1 * stride_mod;
+ GGML_ASSERT(ne12 >= ne02 && ne12 % ne02 == 0);
- const int64_t rows_per_iter = flatten_rows ? nrows0 : ne01;
- const int64_t i03_max = flatten_rows ? 1 : ne03;
- const int64_t i02_max = flatten_rows ? 1 : (ne02 >= ne12 ? ne02 : ne12);
- const int64_t i02_divisor = ne02 >= ne12 ? 1 : ne12 / ne02;
- GGML_ASSERT(!(flatten_rows && ne02 < ne12));
+ const int64_t i02_divisor = ne12 / ne02;
const size_t src0_ts = ggml_type_size(src0->type);
const size_t src0_bs = ggml_blck_size(src0->type);
+ const size_t q8_1_ts = sizeof(block_q8_1);
+ const size_t q8_1_bs = QK8_1;
- struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
- struct ggml_tensor_extra_gpu * src1_extra = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr;
- struct ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
+ ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
+ ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
+ ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
const bool src0_on_device = src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT;
const bool src0_is_contiguous = ggml_is_contiguous(src0);
- const bool src0_is_f32 = src0->type == GGML_TYPE_F32;
- const bool src1_is_contiguous = use_src1 && ggml_is_contiguous(src1);
- const bool src1_stays_on_host = use_src1 && (
- dst->op == GGML_OP_SCALE || dst->op == GGML_OP_DIAG_MASK_INF || dst->op == GGML_OP_ROPE);
+ const bool src1_is_contiguous = ggml_is_contiguous(src1);
+ const int64_t src1_padded_col_size = ne10 % MATRIX_ROW_PADDING == 0 ?
+ ne10 : ne10 - ne10 % MATRIX_ROW_PADDING + MATRIX_ROW_PADDING;
const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT;
+ GGML_ASSERT(!(split && ne02 > 1));
+ GGML_ASSERT(!(split && ne03 > 1));
GGML_ASSERT(!(split && ne02 < ne12));
- const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type);
-
// dd = data device
- char * src0_ddq[GGML_CUDA_MAX_DEVICES] = {nullptr}; // quantized
- float * src0_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr}; // float
- float * src1_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr};
- float * dst_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr};
-
- // asq = actual size quantized, asf = actual size float
- size_t src0_asq[GGML_CUDA_MAX_DEVICES] = {0};
- size_t src0_asf[GGML_CUDA_MAX_DEVICES] = {0};
- size_t src1_asf[GGML_CUDA_MAX_DEVICES] = {0};
- size_t dst_asf[GGML_CUDA_MAX_DEVICES] = {0};
+ char * src0_dd[GGML_CUDA_MAX_DEVICES] = {nullptr};
+ float * src1_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr}; // float
+ char * src1_ddq[GGML_CUDA_MAX_DEVICES] = {nullptr}; // q8_1
+ float * dst_dd[GGML_CUDA_MAX_DEVICES] = {nullptr};
- // if multiple devices are used they need to wait for the main device
- // here an event is recorded that signifies that the main device has finished calculating the input data
- if (split && g_device_count > 1) {
- CUDA_CHECK(cudaSetDevice(g_main_device));
- CUDA_CHECK(cudaEventRecord(src0_extra->events[g_main_device], g_cudaStreams_main[g_main_device]));
- }
+ // as = actual size
+ size_t src0_as[GGML_CUDA_MAX_DEVICES] = {0};
+ size_t src1_asf[GGML_CUDA_MAX_DEVICES] = {0};
+ size_t src1_asq[GGML_CUDA_MAX_DEVICES] = {0};
+ size_t dst_as[GGML_CUDA_MAX_DEVICES] = {0};
- for (int id = 0; id < g_device_count; ++id) {
- if (!split && id != g_main_device) {
- continue;
- }
+ int64_t row_low[GGML_CUDA_MAX_DEVICES];
+ int64_t row_high[GGML_CUDA_MAX_DEVICES];
- const bool src1_on_device = use_src1 && src1->backend == GGML_BACKEND_GPU && id == g_main_device;
- const bool dst_on_device = dst->backend == GGML_BACKEND_GPU && id == g_main_device;
+ for (int64_t id = 0; id < g_device_count; ++id) {
+ // by default, use all rows
+ row_low[id] = 0;
+ row_high[id] = ne01;
- int64_t row_low, row_high;
+ // for multi GPU, get the row boundaries from tensor split
+ // and round to mul_mat_q tile sizes
if (split) {
const int64_t rounding = get_row_rounding(src0->type);
- row_low = id == 0 ? 0 : nrows0*g_tensor_split[id];
- row_low -= row_low % rounding;
+ if (id != 0) {
+ row_low[id] = ne01*g_tensor_split[id];
+ row_low[id] -= row_low[id] % rounding;
+ }
- if (id == g_device_count - 1) {
- row_high = nrows0;
- } else {
- row_high = nrows0*g_tensor_split[id + 1];
- row_high -= row_high % rounding;
+ if (id != g_device_count - 1) {
+ row_high[id] = ne01*g_tensor_split[id + 1];
+ row_high[id] -= row_high[id] % rounding;
}
- } else {
- row_low = 0;
- row_high = nrows0*i02_divisor;
}
- if (row_low == row_high) {
+ }
+
+ for (int64_t id = 0; id < g_device_count; ++id) {
+ if ((!split && id != g_main_device) || row_low[id] == row_high[id]) {
continue;
}
- int64_t row_diff = row_high - row_low;
+ const bool src1_on_device = src1->backend == GGML_BACKEND_GPU && id == g_main_device;
+ const bool dst_on_device = dst->backend == GGML_BACKEND_GPU && id == g_main_device;
- cudaSetDevice(id);
- cudaStream_t cudaStream_main = g_cudaStreams_main[id];
-
- // wait for main GPU data if necessary
- if (split && id != g_main_device) {
- CUDA_CHECK(cudaStreamWaitEvent(cudaStream_main, src0_extra->events[g_main_device]));
- }
+ ggml_cuda_set_device(id);
+ const cudaStream_t stream = g_cudaStreams[id][0];
if (src0_on_device && src0_is_contiguous) {
- if (src0_is_f32) {
- src0_ddf[id] = (float *) src0_extra->data_device[id];
- } else {
- src0_ddq[id] = (char *) src0_extra->data_device[id];
- }
+ src0_dd[id] = (char *) src0_extra->data_device[id];
} else {
- if (src0_is_f32) {
- src0_ddf[id] = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_asf[id]);
- } else {
- src0_ddq[id] = (char *) ggml_cuda_pool_malloc(row_diff*ne00 * src0_ts/src0_bs, &src0_asq[id]);
- }
+ const size_t size_src0_ddq = split ? (row_high[id]-row_low[id])*ne00 * src0_ts/src0_bs : ggml_nbytes(src0);
+ src0_dd[id] = (char *) ggml_cuda_pool_malloc_async(ggml_nbytes(src0), &src0_as[id], id, stream);
}
- if (src0_needs_f32 && !src0_is_f32) {
- src0_ddf[id] = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_asf[id]);
+ if (src1_on_device && src1_is_contiguous) {
+ src1_ddf[id] = (float *) src1_extra->data_device[id];
+ } else {
+ src1_ddf[id] = (float *) ggml_cuda_pool_malloc_async(ggml_nbytes(src1), &src1_asf[id], id, stream);
}
- if (use_src1 && !src1_stays_on_host) {
+ if (convert_src1_to_q8_1) {
+ const size_t size_dst_ddq = nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs;
+ src1_ddq[id] = (char *) ggml_cuda_pool_malloc_async(size_dst_ddq, &src1_asq[id], id, stream);
+
if (src1_on_device && src1_is_contiguous) {
- src1_ddf[id] = (float *) src1_extra->data_device[id];
- } else {
- src1_ddf[id] = (float *) ggml_cuda_pool_malloc(num_iters*src1_stride * sizeof(float), &src1_asf[id]);
+ quantize_row_q8_1_cuda(src1_ddf[id], src1_ddq[id], ne10, nrows1, src1_padded_col_size, stream);
+ // CUDA_CHECK(cudaGetLastError());
}
}
+
if (dst_on_device) {
- dst_ddf[id] = (float *) dst_extra->data_device[id];
+ dst_dd[id] = (float *) dst_extra->data_device[id];
} else {
- size_t size_dst_ddf = split ? row_diff*ne1 * sizeof(float) : num_iters*dst_stride * sizeof(float);
- dst_ddf[id] = (float *) ggml_cuda_pool_malloc(size_dst_ddf, &dst_asf[id]);
+ const size_t size_dst_ddf = split ? (row_high[id]-row_low[id])*ne1*sizeof(float) : ggml_nbytes(dst);
+ dst_dd[id] = (float *) ggml_cuda_pool_malloc_async(size_dst_ddf, &dst_as[id], id, stream);
}
+ }
- for (int64_t i03 = 0; i03 < i03_max; i03++) {
- const int64_t i13 = i03 % ne13;
- for (int64_t i02 = 0; i02 < i02_max; i02++) {
- const int64_t i12 = i02 % ne12;
+ // if multiple devices are used they need to wait for the main device
+ // here an event is recorded that signals that the main device has finished calculating the input data
+ if (split && g_device_count > 1) {
+ CUDA_CHECK(ggml_cuda_set_device(g_main_device));
+ CUDA_CHECK(cudaEventRecord(src0_extra->events[g_main_device][0], g_cudaStreams[g_main_device][0]));
+ }
- const int64_t i0 = i03*i02_max + i02;
+ const int64_t src1_col_stride = split && g_device_count > 1 ? MUL_MAT_SRC1_COL_STRIDE : ne11;
+ for (int64_t src1_col_0 = 0; src1_col_0 < ne11; src1_col_0 += src1_col_stride) {
+ const int64_t is = split ? (src1_col_0/src1_col_stride) % MAX_STREAMS : 0;
+ const int64_t src1_ncols = src1_col_0 + src1_col_stride > ne11 ? ne11 - src1_col_0 : src1_col_stride;
- // i0 values that contain the lower/upper rows for a split tensor when using multiple GPUs
- const int64_t i0_offset_low = row_low/rows_per_iter;
- const int64_t i0_offset_high = row_high/rows_per_iter;
+ for (int64_t id = 0; id < g_device_count; ++id) {
+ if ((!split && id != g_main_device) || row_low[id] == row_high[id]) {
+ continue;
+ }
- int64_t i01_low = 0;
- int64_t i01_high = rows_per_iter;
- if (split) {
- if (i0 < i0_offset_low || i0 > i0_offset_high) {
- continue;
- }
- if (i0 == i0_offset_low) {
- i01_low = row_low % rows_per_iter;
- }
- if (i0 == i0_offset_high) {
- i01_high = row_high % rows_per_iter;
- }
- }
+ const bool src1_on_device = src1->backend == GGML_BACKEND_GPU && id == g_main_device;
+ const bool dst_on_device = dst->backend == GGML_BACKEND_GPU && id == g_main_device;
+ const int64_t row_diff = row_high[id] - row_low[id];
- // There is possibly a bug in the Windows nvcc compiler regarding instruction reordering or optimizing out local variables.
- // Removing the first assert or changing the order of the arguments causes the second assert to fail.
- // Removing both asserts results in i01_high becoming 0 which in turn results in garbage output.
- // The root cause seems to be a problem with i0_offset_high becoming 0 when it should always be >0 (for single GPU).
- GGML_ASSERT(i01_low == 0 || g_device_count > 1);
- GGML_ASSERT(i01_high == rows_per_iter || g_device_count > 1);
+ ggml_cuda_set_device(id);
+ const cudaStream_t stream = g_cudaStreams[id][is];
- const int64_t i01_diff = i01_high - i01_low;
- if (i01_diff == 0) {
- continue;
- }
- const int64_t i11 = i13*ne12 + i12;
+ // wait for main GPU data if necessary
+ if (split && (id != g_main_device || is != 0)) {
+ CUDA_CHECK(cudaStreamWaitEvent(stream, src0_extra->events[g_main_device][0], 0));
+ }
+
+ for (int64_t i0 = 0; i0 < ne13*ne12; ++i0) {
+ const int64_t i03 = i0 / ne12;
+ const int64_t i02 = i0 % ne12;
+
+ const size_t src1_ddq_i_offset = (i0*ne11 + src1_col_0) * src1_padded_col_size*q8_1_ts/q8_1_bs;
// for split tensors the data begins at i0 == i0_offset_low
- char * src0_ddq_i = src0_ddq[id] + (i0/i02_divisor - i0_offset_low)*src0_stride*src0_ts/src0_bs;
- float * src0_ddf_i = src0_ddf[id] + (i0/i02_divisor - i0_offset_low)*src0_stride;
- float * src1_ddf_i = src1_ddf[id] + i11*src1_stride;
- float * dst_ddf_i = dst_ddf[id] + (i0 - i0_offset_low)*dst_stride;
-
- // for split tensors the data pointer needs to be rounded down
- // to the bin edge for i03, i02 bins beyond the first
- if (i0 - i0_offset_low > 0) {
- GGML_ASSERT(!flatten_rows);
- src0_ddq_i -= (row_low % ne01)*ne00 * src0_ts/src0_bs;
- src0_ddf_i -= (row_low % ne01)*ne00;
- dst_ddf_i -= (row_low % ne0)*ne1;
- }
+ char * src0_dd_i = src0_dd[id] + (i0/i02_divisor) * ne01*ne00*src0_ts/src0_bs;
+ float * src1_ddf_i = src1_ddf[id] + (i0*ne11 + src1_col_0) * ne10;
+ char * src1_ddq_i = src1_ddq[id] + src1_ddq_i_offset;
+ float * dst_dd_i = dst_dd[id] + (i0*ne1 + src1_col_0) * (dst_on_device ? ne0 : row_diff);
// the main device memory buffer can be on VRAM scratch, with space for all partial results
// in that case an offset on dst_ddf_i is needed
if (dst->backend == GGML_BACKEND_GPU && id == g_main_device) {
- dst_ddf_i += i01_low; // offset is 0 if no tensor split
+ dst_dd_i += row_low[id]; // offset is 0 if no tensor split
}
// copy src0, src1 to device if necessary
- if (use_src1 && !src1_stays_on_host) {
- if (src1->backend == GGML_BACKEND_CPU) {
- GGML_ASSERT(!flatten_rows || nrows0 == ggml_nrows(src1));
- int64_t nrows1 = flatten_rows ? nrows0 : ne11;
- CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src1_ddf_i, src1, i03, i02, 0, nrows1, cudaStream_main));
- } else if (src1->backend == GGML_BACKEND_GPU && src1_is_contiguous) {
- if (id != g_main_device) {
- GGML_ASSERT(!flatten_rows);
+ if (src1->backend == GGML_BACKEND_GPU && src1_is_contiguous) {
+ if (id != g_main_device) {
+ if (convert_src1_to_q8_1) {
+ char * src1_ddq_i_source = src1_ddq[g_main_device] + src1_ddq_i_offset;
+ CUDA_CHECK(cudaMemcpyAsync(src1_ddq_i, src1_ddq_i_source, src1_ncols*src1_padded_col_size*q8_1_ts/q8_1_bs,
+ cudaMemcpyDeviceToDevice, stream));
+ } else {
float * src1_ddf_i_source = (float *) src1_extra->data_device[g_main_device];
- src1_ddf_i_source += i11*src1_stride;
- CUDA_CHECK(cudaMemcpyAsync(src1_ddf_i, src1_ddf_i_source, src1_stride*sizeof(float),
- cudaMemcpyDeviceToDevice, cudaStream_main));
+ src1_ddf_i_source += (i0*ne11 + src1_col_0) * ne10;
+ CUDA_CHECK(cudaMemcpyAsync(src1_ddf_i, src1_ddf_i_source, src1_ncols*ne10*sizeof(float),
+ cudaMemcpyDeviceToDevice, stream));
}
- } else if (src1_on_device && !src1_is_contiguous) {
- GGML_ASSERT(!split);
- CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src1_ddf_i, src1, i03, i02, 0, ne11, cudaStream_main));
- } else {
- GGML_ASSERT(false);
}
+ } else if (src1->backend == GGML_BACKEND_CPU || (src1_on_device && !src1_is_contiguous)) {
+ CUDA_CHECK(ggml_cuda_cpy_tensor_2d(
+ src1_ddf_i, src1, i03, i02, src1_col_0, src1_col_0+src1_ncols, stream));
+ } else {
+ GGML_ASSERT(false);
}
- if ((!src0_on_device || !src0_is_contiguous) && i02 % i02_divisor == 0) {
- if (src0_is_f32) {
- CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_ddf_i, src0, i03, i02/i02_divisor, i01_low, i01_high, cudaStream_main));
- } else {
- CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_ddq_i, src0, i03, i02/i02_divisor, i01_low, i01_high, cudaStream_main));
- }
+ if (convert_src1_to_q8_1 && (src1->backend == GGML_BACKEND_CPU || !src1_is_contiguous)) {
+ quantize_row_q8_1_cuda(src1_ddf_i, src1_ddq_i, ne10, src1_ncols, src1_padded_col_size, stream);
+ CUDA_CHECK(cudaGetLastError());
}
- // convert src0 to f32 if it is necessary for the ggml_cuda_op
- if (src0_needs_f32 && !src0_is_f32) {
- to_fp32_cuda(src0_ddq_i, src0_ddf_i, i01_diff*ne00, cudaStream_main);
- CUDA_CHECK(cudaGetLastError());
+ if (src1_col_0 == 0 && (!src0_on_device || !src0_is_contiguous) && i02 % i02_divisor == 0) {
+ CUDA_CHECK(ggml_cuda_cpy_tensor_2d(src0_dd_i, src0, i03, i02/i02_divisor, row_low[id], row_high[id], stream));
}
// do the computation
- op(src0, src1, dst, src0_ddq_i, src0_ddf_i, src1_ddf_i, dst_ddf_i, i02, i01_low, i01_high, i11, cudaStream_main);
+ op(src0, src1, dst, src0_dd_i, src1_ddf_i, src1_ddq_i, dst_dd_i,
+ row_low[id], row_high[id], src1_ncols, src1_padded_col_size, stream);
CUDA_CHECK(cudaGetLastError());
// copy dst to host or other device if necessary
// The outputs of matrix matrix multiplications can therefore NOT simply be concatenated for >1 GPU.
// Instead they need to be copied to the correct slice in ne0 = dst row index.
// If dst is a vector with ne0 == 1 then you don't have to do this but it still produces correct results.
- float * dhf_dst_i = (float *) ((char *) dst_off_device + i01_low*sizeof(float) + i02*nb2 + i03*nb3);
- CUDA_CHECK(cudaMemcpy2DAsync(dhf_dst_i, ne0*sizeof(float), dst_ddf_i, i01_diff*sizeof(float),
- i01_diff*sizeof(float), ne1, kind, cudaStream_main));
+ float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3);
+ GGML_ASSERT(dst->nb[1] == ne0*sizeof(float));
+ dhf_dst_i += src1_col_0*ne0 + row_low[id];
+ CUDA_CHECK(cudaMemcpy2DAsync(dhf_dst_i, ne0*sizeof(float), dst_dd_i, row_diff*sizeof(float),
+ row_diff*sizeof(float), src1_ncols, kind, stream));
} else {
float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3);
- CUDA_CHECK(cudaMemcpyAsync(dhf_dst_i, dst_ddf_i, dst_stride*sizeof(float), kind, cudaStream_main));
+ GGML_ASSERT(dst->nb[1] == ne0*sizeof(float));
+ dhf_dst_i += src1_col_0*ne0;
+ CUDA_CHECK(cudaMemcpyAsync(dhf_dst_i, dst_dd_i, src1_ncols*ne0*sizeof(float), kind, stream));
}
}
- // signify to main device that other device is done
- if (split && g_device_count > 1 && id != g_main_device) {
- CUDA_CHECK(cudaEventRecord(src0_extra->events[id], cudaStream_main));
- }
- }
- }
- }
-
- // wait until each device is finished, then free their buffers
- for (int id = 0; id < g_device_count; ++id) {
- if (src0_asq[id] == 0 && src0_asf[id] == 0 && src1_asf[id] == 0 && dst_asf[id] == 0) {
- continue;
- }
-
- CUDA_CHECK(cudaSetDevice(id));
-
- if (src0_asq[id] > 0) {
- ggml_cuda_pool_free(src0_ddq[id], src0_asq[id]);
- }
- if (src0_asf[id] > 0) {
- ggml_cuda_pool_free(src0_ddf[id], src0_asf[id]);
- }
- if (src1_asf[id] > 0) {
- ggml_cuda_pool_free(src1_ddf[id], src1_asf[id]);
- }
- if (dst_asf[id] > 0) {
- ggml_cuda_pool_free(dst_ddf[id], dst_asf[id]);
+ // add event for the main device to wait on until other device is done
+ if (split && (id != g_main_device || is != 0)) {
+ CUDA_CHECK(cudaEventRecord(src0_extra->events[id][is], stream));
+ }
+ }
}
}
// main device waits for all other devices to be finished
if (split && g_device_count > 1) {
- CUDA_CHECK(cudaSetDevice(g_main_device));
- for (int id = 0; id < g_device_count; ++id) {
- if (id != g_main_device && src0_extra->events[id]) {
- CUDA_CHECK(cudaStreamWaitEvent(g_cudaStreams_main[g_main_device], src0_extra->events[id]));
+ int64_t is_max = (ne11 + MUL_MAT_SRC1_COL_STRIDE - 1) / MUL_MAT_SRC1_COL_STRIDE;
+ is_max = is_max <= MAX_STREAMS ? is_max : MAX_STREAMS;
+
+ CUDA_CHECK(ggml_cuda_set_device(g_main_device));
+ for (int64_t id = 0; id < g_device_count; ++id) {
+ for (int64_t is = 0; is < is_max; ++is) {
+ CUDA_CHECK(cudaStreamWaitEvent(g_cudaStreams[g_main_device][0], src0_extra->events[id][is], 0));
}
}
}
if (dst->backend == GGML_BACKEND_CPU) {
- CUDA_CHECK(cudaSetDevice(g_main_device));
+ CUDA_CHECK(ggml_cuda_set_device(g_main_device));
CUDA_CHECK(cudaDeviceSynchronize());
}
+
+ for (int64_t id = 0; id < g_device_count; ++id) {
+ if (src0_as[id] > 0) {
+ ggml_cuda_pool_free_async(src0_dd[id], src0_as[id], id, g_cudaStreams[id][0]);
+ }
+ if (src1_asf[id] > 0) {
+ ggml_cuda_pool_free_async(src1_ddf[id], src1_asf[id], id, g_cudaStreams[id][0]);
+ }
+ if (src1_asq[id] > 0) {
+ ggml_cuda_pool_free_async(src1_ddq[id], src1_asq[id], id, g_cudaStreams[id][0]);
+ }
+ if (dst_as[id] > 0) {
+ ggml_cuda_pool_free_async(dst_dd[id], dst_as[id], id, g_cudaStreams[id][0]);
+ }
+ }
+}
+
+static void ggml_cuda_repeat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_repeat);
+}
+
+static void ggml_cuda_get_rows(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_get_rows);
}
-void ggml_cuda_add(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- // ggml_cuda_add permits f16 dst even though this could in theory cause problems with the pointer arithmetic in ggml_cuda_op.
- // Due to flatten_rows == true this does in practice not make a difference however.
- // Better solution would be nice but right now that would require disproportionate changes.
- GGML_ASSERT(
- (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16) &&
- src1->type == GGML_TYPE_F32 &&
- (dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16));
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_add, false, true);
+static void ggml_cuda_add(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_add);
}
-void ggml_cuda_mul(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul, true, false); // TODO ggml_cuda_op needs modification for flatten
+static void ggml_cuda_mul(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_mul);
}
-void ggml_cuda_gelu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_gelu, true, true);
+static void ggml_cuda_gelu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_gelu);
}
-void ggml_cuda_silu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_silu, true, true);
+static void ggml_cuda_silu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_silu);
}
-void ggml_cuda_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_norm, true, true);
+static void ggml_cuda_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_norm);
}
-void ggml_cuda_rms_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rms_norm, true, true);
+static void ggml_cuda_rms_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_rms_norm);
}
bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
const int64_t ne1 = dst->ne[1];
// TODO: find the optimal values for these
- if ((src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) &&
- src1->type == GGML_TYPE_F32 &&
- dst->type == GGML_TYPE_F32 &&
- (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
- return true;
- }
-
- return false;
+ return (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) &&
+ src1->type == GGML_TYPE_F32 &&
+ dst->type == GGML_TYPE_F32 &&
+ (ne0 >= 32 && ne1 >= 32 && ne10 >= 32);
}
-void ggml_cuda_mul_mat_vec_p021(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
+static void ggml_cuda_mul_mat_vec_p021(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT);
GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation
const int64_t ne12 = src1->ne[2];
- CUDA_CHECK(cudaSetDevice(g_main_device));
- cudaStream_t cudaStream_main = g_cudaStreams_main[g_main_device];
+ CUDA_CHECK(ggml_cuda_set_device(g_main_device));
+ cudaStream_t main_stream = g_cudaStreams[g_main_device][0];
- struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
+ ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
void * src0_ddq = src0_extra->data_device[g_main_device];
- struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
+ ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
float * src1_ddf = (float *) src1_extra->data_device[g_main_device];
- struct ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
+ ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
float * dst_ddf = (float *) dst_extra->data_device[g_main_device];
- ggml_mul_mat_p021_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, ne12, cudaStream_main);
+ ggml_mul_mat_p021_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, ne12, main_stream);
}
-void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
- GGML_ASSERT(!ggml_is_contiguous(src0) && ggml_is_contiguous(src1));
+static void ggml_cuda_mul_mat_vec_nc(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst){
+ GGML_ASSERT(!ggml_is_transposed(src0));
+ GGML_ASSERT(!ggml_is_transposed(src1));
GGML_ASSERT(!ggml_is_permuted(src0));
GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT);
GGML_ASSERT(src0->type == GGML_TYPE_F16);
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
+ const int64_t nb01 = src0->nb[1];
+ const int64_t nb02 = src0->nb[2];
+
const int64_t ne12 = src1->ne[2];
+ CUDA_CHECK(ggml_cuda_set_device(g_main_device));
+ cudaStream_t main_stream = g_cudaStreams[g_main_device][0];
+
+ ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
+ void * src0_ddq = src0_extra->data_device[g_main_device];
+
+ ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
+ float * src1_ddf = (float *) src1_extra->data_device[g_main_device];
+
+ ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
+ float * dst_ddf = (float *) dst_extra->data_device[g_main_device];
+
+ const int64_t row_stride_x = nb01 / sizeof(half);
+ const int64_t channel_stride_x = nb02 / sizeof(half);
+
+ ggml_mul_mat_vec_nc_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, row_stride_x, ne02, ne12, channel_stride_x, main_stream);
+}
+
+__global__ void k_compute_batched_ptrs(
+ const half * src0_as_f16, const half * src1_as_f16, half * dst_f16,
+ const void ** ptrs_src, void ** ptrs_dst,
+ int ne12, int ne13,
+ int ne23,
+ int nb02, int nb03,
+ int nb12, int nb13,
+ int nb2, int nb3,
+ int r2, int r3) {
+ int i13 = blockIdx.x * blockDim.x + threadIdx.x;
+ int i12 = blockIdx.y * blockDim.y + threadIdx.y;
+
+ if (i13 >= ne13 || i12 >= ne12) {
+ return;
+ }
+
+ int i03 = i13 / r3;
+ int i02 = i12 / r2;
+
+ ptrs_src[0*ne23 + i12 + i13*ne12] = (const char *) src0_as_f16 + i02*nb02 + i03*nb03;
+ ptrs_src[1*ne23 + i12 + i13*ne12] = (const char *) src1_as_f16 + i12*nb12/2 + i13*nb13/2;
+ ptrs_dst[0*ne23 + i12 + i13*ne12] = ( char *) dst_f16 + i12* nb2/2 + i13* nb3/2;
+}
+
+static void ggml_cuda_mul_mat_mat_batched_cublas(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ GGML_ASSERT(!ggml_is_transposed(src0));
+ GGML_ASSERT(!ggml_is_transposed(src1));
+
+ GGML_ASSERT(src0->backend != GGML_BACKEND_GPU_SPLIT);
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+
+ const int64_t ne00 = src0->ne[0]; GGML_UNUSED(ne00);
+ const int64_t ne01 = src0->ne[1];
+ const int64_t ne02 = src0->ne[2];
+ const int64_t ne03 = src0->ne[3];
+
const int64_t nb01 = src0->nb[1];
- const int64_t nb02 = src0->nb[2];
+ const int64_t nb02 = src0->nb[2]; GGML_UNUSED(nb02);
+ const int64_t nb03 = src0->nb[3]; GGML_UNUSED(nb03);
+
+ const int64_t ne10 = src1->ne[0];
+ const int64_t ne11 = src1->ne[1];
+ const int64_t ne12 = src1->ne[2];
+ const int64_t ne13 = src1->ne[3];
- CUDA_CHECK(cudaSetDevice(g_main_device));
- cudaStream_t cudaStream_main = g_cudaStreams_main[g_main_device];
+ const int64_t nb11 = src1->nb[1];
+ const int64_t nb12 = src1->nb[2]; GGML_UNUSED(nb12);
+ const int64_t nb13 = src1->nb[3]; GGML_UNUSED(nb13);
+
+ const int64_t ne1 = ggml_nelements(src1);
+ const int64_t ne = ggml_nelements(dst);
+
+ CUDA_CHECK(ggml_cuda_set_device(g_main_device));
+ cudaStream_t main_stream = g_cudaStreams[g_main_device][0];
+
+ int id;
+ CUDA_CHECK(cudaGetDevice(&id));
+ CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], main_stream));
- struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
+ ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
void * src0_ddq = src0_extra->data_device[g_main_device];
+ half * src0_as_f16 = (half *) src0_ddq;
- struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
+ ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
float * src1_ddf = (float *) src1_extra->data_device[g_main_device];
- struct ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
+ ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
float * dst_ddf = (float *) dst_extra->data_device[g_main_device];
- const int row_stride_x = nb01 / sizeof(half);
- const int channel_stride_x = nb02 / sizeof(half);
+ // convert src1 to fp16
+ const to_fp16_cuda_t to_fp16_cuda = ggml_get_to_fp16_cuda(src1->type);
+ GGML_ASSERT(to_fp16_cuda != nullptr);
+
+ size_t src1_as = 0;
+ half * src1_as_f16 = (half *) ggml_cuda_pool_malloc_async(ne1 * sizeof(half), &src1_as, id, main_stream);
+ to_fp16_cuda(src1_ddf, src1_as_f16, ne1, main_stream);
+
+ size_t dst_as = 0;
+ half * dst_f16 = (half *) ggml_cuda_pool_malloc_async(ne * sizeof(half), &dst_as, id, main_stream);
+
+ GGML_ASSERT(ne12 % ne02 == 0);
+ GGML_ASSERT(ne13 % ne03 == 0);
+
+ // broadcast factors
+ const int64_t r2 = ne12/ne02;
+ const int64_t r3 = ne13/ne03;
+
+ const half alpha_f16 = 1.0f;
+ const half beta_f16 = 0.0f;
+
+#if 0
+ // use cublasGemmEx
+ {
+ for (int i13 = 0; i13 < ne13; ++i13) {
+ for (int i12 = 0; i12 < ne12; ++i12) {
+ int i03 = i13 / r3;
+ int i02 = i12 / r2;
+
+ CUBLAS_CHECK(
+ cublasGemmEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
+ ne01, ne11, ne10,
+ &alpha_f16, (const char *) src0_as_f16 + i02*src0->nb[2] + i03*src0->nb[3] , CUDA_R_16F, nb01/sizeof(half),
+ (const char *) src1_as_f16 + i12*src1->nb[2]/2 + i13*src1->nb[3]/2, CUDA_R_16F, nb11/sizeof(float),
+ &beta_f16, ( char *) dst_f16 + i12* dst->nb[2]/2 + i13* dst->nb[3]/2, CUDA_R_16F, ne01,
+ CUBLAS_COMPUTE_16F,
+ CUBLAS_GEMM_DEFAULT_TENSOR_OP));
+ }
+ }
+ }
+#else
+ if (r2 == 1 && r3 == 1 && src0->nb[2]*src0->ne[2] == src0->nb[3] && src1->nb[2]*src1->ne[2] == src1->nb[3]) {
+ // there is no broadcast and src0, src1 are contiguous across dims 2, 3
+ // use cublasGemmStridedBatchedEx
+ CUBLAS_CHECK(
+ cublasGemmStridedBatchedEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
+ ne01, ne11, ne10,
+ &alpha_f16, (const char *) src0_as_f16, CUDA_R_16F, nb01/sizeof(half), src0->nb[2]/sizeof(half), // strideA
+ (const char *) src1_as_f16, CUDA_R_16F, nb11/sizeof(float), src1->nb[2]/sizeof(float), // strideB
+ &beta_f16, ( char *) dst_f16, CUDA_R_16F, ne01, dst->nb[2]/sizeof(float), // strideC
+ ne12*ne13,
+ CUBLAS_COMPUTE_16F,
+ CUBLAS_GEMM_DEFAULT_TENSOR_OP));
+ } else {
+ // use cublasGemmBatchedEx
+ const int ne23 = ne12*ne13;
+
+ const void ** ptrs_src = nullptr;
+ void ** ptrs_dst = nullptr;
+
+ size_t ptrs_src_s = 0;
+ size_t ptrs_dst_s = 0;
+
+ ptrs_src = (const void **) ggml_cuda_pool_malloc_async(2*ne23*sizeof(void *), &ptrs_src_s, id, main_stream);
+ ptrs_dst = ( void **) ggml_cuda_pool_malloc_async(1*ne23*sizeof(void *), &ptrs_dst_s, id, main_stream);
+
+ dim3 block_dims(ne13, ne12);
+ k_compute_batched_ptrs<<<1, block_dims, 0, main_stream>>>(
+ src0_as_f16, src1_as_f16, dst_f16,
+ ptrs_src, ptrs_dst,
+ ne12, ne13,
+ ne23,
+ nb02, nb03,
+ nb12, nb13,
+ dst->nb[2], dst->nb[3],
+ r2, r3);
+ CUDA_CHECK(cudaGetLastError());
+ CUBLAS_CHECK(
+ cublasGemmBatchedEx(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
+ ne01, ne11, ne10,
+ &alpha_f16, (const void **) (ptrs_src + 0*ne23), CUDA_R_16F, nb01/sizeof(half),
+ (const void **) (ptrs_src + 1*ne23), CUDA_R_16F, nb11/sizeof(float),
+ &beta_f16, ( void **) (ptrs_dst + 0*ne23), CUDA_R_16F, ne01,
+ ne23,
+ CUBLAS_COMPUTE_16F,
+ CUBLAS_GEMM_DEFAULT_TENSOR_OP));
+
+ if (ptrs_src_s != 0) {
+ ggml_cuda_pool_free_async(ptrs_src, ptrs_src_s, id, main_stream);
+ }
+ if (ptrs_dst_s != 0) {
+ ggml_cuda_pool_free_async(ptrs_dst, ptrs_dst_s, id, main_stream);
+ }
+ }
+#endif
- ggml_mul_mat_vec_nc_f16_f32_cuda(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, row_stride_x, ne02, ne12, channel_stride_x, cudaStream_main);
+ const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16);
+ to_fp32_cuda(dst_f16, dst_ddf, ne, main_stream);
+ if (src1_as != 0) {
+ ggml_cuda_pool_free_async(src1_as_f16, src1_as, id, main_stream);
+ }
+ if (dst_as != 0) {
+ ggml_cuda_pool_free_async(dst_f16, dst_as, id, main_stream);
+ }
}
-void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- bool all_on_device = (src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT) &&
- src1->backend == GGML_BACKEND_GPU && dst->backend == GGML_BACKEND_GPU;
+static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ const bool all_on_device =
+ (src0->backend == GGML_BACKEND_GPU) &&
+ (src1->backend == GGML_BACKEND_GPU) &&
+ ( dst->backend == GGML_BACKEND_GPU);
+
+ int64_t min_compute_capability = INT_MAX;
+ for (int64_t id = 0; id < g_device_count; ++id) {
+ if (min_compute_capability > g_compute_capabilities[id] && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) {
+ min_compute_capability = g_compute_capabilities[id];
+ }
+ }
+
+#ifdef CUDA_USE_TENSOR_CORES
+ const bool use_tensor_cores = true;
+#else
+ const bool use_tensor_cores = false;
+#endif
+
+ // debug helpers
+ //printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]);
+ //printf(" %8d %8d %8d %8d\n", src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3]);
+ //printf("src1: %8d %8d %8d %8d\n", src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3]);
+ //printf(" %8d %8d %8d %8d\n", src1->nb[0], src1->nb[1], src1->nb[2], src1->nb[3]);
+ //printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name);
+ //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name);
- if (all_on_device && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
+ if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
+ // KQ single-batch
ggml_cuda_mul_mat_vec_p021(src0, src1, dst);
- } else if (all_on_device && !ggml_is_contiguous(src0) && ggml_is_contiguous(src1) && src1->ne[1] == 1) {
+ } else if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
+ // KQV single-batch
ggml_cuda_mul_mat_vec_nc(src0, src1, dst);
- }else if (src0->type == GGML_TYPE_F32) {
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
+ } else if (all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) {
+ // KQ + KQV multi-batch
+ ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst);
+ } else if (src0->type == GGML_TYPE_F32) {
+ ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, false);
} else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) {
if (src1->ne[1] == 1 && src0->ne[0] % GGML_CUDA_DMMV_X == 0) {
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_vec, false, false);
+#ifdef GGML_CUDA_FORCE_DMMV
+ const bool use_mul_mat_vec_q = false;
+#else
+ const bool use_mul_mat_vec_q = min_compute_capability >= MIN_CC_DP4A && ggml_is_quantized(src0->type);
+#endif // GGML_CUDA_FORCE_DMMV
+
+ if (use_mul_mat_vec_q) {
+ ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_vec_q, true);
+ } else {
+ ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, false);
+ }
} else {
- int min_compute_capability = INT_MAX;
- for (int id = 0; id < g_device_count; ++id) {
- if (min_compute_capability > g_compute_capabilities[id]
- && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) {
- min_compute_capability = g_compute_capabilities[id];
- }
+ bool use_mul_mat_q = min_compute_capability >= MIN_CC_DP4A && ggml_is_quantized(src0->type);
+
+ // when tensor cores are available, use them for large batch size
+ // ref: https://github.com/ggerganov/llama.cpp/pull/3776
+ if (use_tensor_cores && min_compute_capability >= CC_VOLTA && src1->ne[1] > MMQ_MAX_BATCH_SIZE) {
+ use_mul_mat_q = false;
}
- if (g_mul_mat_q && ggml_is_quantized(src0->type) && min_compute_capability >= MIN_CC_DP4A) {
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_q, false, false);
+ if (use_mul_mat_q) {
+ ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_q, true);
} else {
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
+ ggml_cuda_op_mul_mat(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, false);
}
}
} else {
}
}
-void ggml_cuda_scale(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_scale, true, true);
+static void ggml_cuda_scale(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_scale);
+}
+
+static void ggml_cuda_clamp(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_clamp);
}
-void ggml_cuda_cpy(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+static void ggml_cuda_cpy(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
const int64_t ne = ggml_nelements(src0);
GGML_ASSERT(ne == ggml_nelements(src1));
const int64_t nb11 = src1->nb[1];
const int64_t nb12 = src1->nb[2];
- CUDA_CHECK(cudaSetDevice(g_main_device));
- cudaStream_t cudaStream_main = g_cudaStreams_main[g_main_device];
+ CUDA_CHECK(ggml_cuda_set_device(g_main_device));
+ cudaStream_t main_stream = g_cudaStreams[g_main_device][0];
- const struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
- const struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
+ const ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
+ const ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
char * src0_ddc = (char *) src0_extra->data_device[g_main_device];
char * src1_ddc = (char *) src1_extra->data_device[g_main_device];
if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) {
ggml_cpy_f32_f32_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02,
- ne10, ne11, nb10, nb11, nb12, cudaStream_main);
+ ne10, ne11, nb10, nb11, nb12, main_stream);
} else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) {
ggml_cpy_f32_f16_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02,
- ne10, ne11, nb10, nb11, nb12, cudaStream_main);
+ ne10, ne11, nb10, nb11, nb12, main_stream);
} else {
+ fprintf(stderr, "%s: unsupported type combination (%s to %s)\n", __func__,
+ ggml_type_name(src0->type), ggml_type_name(src1->type));
GGML_ASSERT(false);
}
(void) dst;
}
-void ggml_cuda_dup(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+static void ggml_cuda_dup(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
ggml_cuda_cpy(src0, dst, nullptr);
(void) src1;
}
-void ggml_cuda_diag_mask_inf(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_diag_mask_inf, true, true);
+static void ggml_cuda_diag_mask_inf(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_diag_mask_inf);
}
-void ggml_cuda_soft_max(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_soft_max, true, true);
+static void ggml_cuda_soft_max(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_soft_max);
}
-void ggml_cuda_rope(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
+static void ggml_cuda_rope(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
GGML_ASSERT(ggml_is_contiguous(src0)); // TODO: this restriction is temporary until non-cont support is implemented
-
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rope, true, true);
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_rope);
}
-void ggml_cuda_alibi(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
- GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
- ggml_cuda_op(src0, src1, dst, ggml_cuda_op_alibi, true, true);
+static void ggml_cuda_alibi(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_alibi);
}
-void ggml_cuda_nop(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+static void ggml_cuda_nop(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
(void) src0;
(void) src1;
(void) dst;
}
void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) {
- int nrows = ggml_nrows(tensor);
+ const int64_t nrows = ggml_nrows(tensor);
const int64_t ne0 = tensor->ne[0];
const size_t nb1 = tensor->nb[1];
- ggml_backend backend = tensor->backend;
- struct ggml_tensor_extra_gpu * extra = new struct ggml_tensor_extra_gpu;
+ ggml_backend_type backend = tensor->backend;
+ ggml_tensor_extra_gpu * extra = new struct ggml_tensor_extra_gpu;
memset(extra, 0, sizeof(*extra));
- for (int id = 0; id < g_device_count; ++id) {
+ for (int64_t id = 0; id < g_device_count; ++id) {
if (backend == GGML_BACKEND_GPU && id != g_main_device) {
continue;
}
- cudaSetDevice(id);
+ ggml_cuda_set_device(id);
- int row_low, row_high;
+ int64_t row_low, row_high;
if (backend == GGML_BACKEND_GPU) {
row_low = 0;
row_high = nrows;
CUDA_CHECK(cudaMemset(buf + original_size, 0, size - original_size));
}
-
CUDA_CHECK(cudaMemcpy(buf, buf_host, original_size, cudaMemcpyHostToDevice));
extra->data_device[id] = buf;
if (backend == GGML_BACKEND_GPU_SPLIT) {
- CUDA_CHECK(cudaEventCreateWithFlags(&extra->events[id], cudaEventDisableTiming));
+ for (int64_t is = 0; is < MAX_STREAMS; ++is) {
+ CUDA_CHECK(cudaEventCreateWithFlags(&extra->events[id][is], cudaEventDisableTiming));
+ }
}
}
ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
- for (int id = 0; id < g_device_count; ++id) {
+ for (int64_t id = 0; id < g_device_count; ++id) {
if (extra->data_device[id] != nullptr) {
- CUDA_CHECK(cudaSetDevice(id));
+ CUDA_CHECK(ggml_cuda_set_device(id));
CUDA_CHECK(cudaFree(extra->data_device[id]));
}
- if (extra->events[id] != nullptr) {
- CUDA_CHECK(cudaSetDevice(id));
- CUDA_CHECK(cudaEventDestroy(extra->events[id]));
+ for (int64_t is = 0; is < MAX_STREAMS; ++is) {
+ if (extra->events[id][is] != nullptr) {
+ CUDA_CHECK(ggml_cuda_set_device(id));
+ CUDA_CHECK(cudaEventDestroy(extra->events[id][is]));
+ }
}
}
delete extra;
}
-static struct ggml_tensor_extra_gpu * g_temp_tensor_extras = nullptr;
+static ggml_tensor_extra_gpu * g_temp_tensor_extras = nullptr;
static size_t g_temp_tensor_extra_index = 0;
-static struct ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() {
+static ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() {
if (g_temp_tensor_extras == nullptr) {
- g_temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_MAX_NODES];
+ g_temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_DEFAULT_GRAPH_SIZE];
}
size_t alloc_index = g_temp_tensor_extra_index;
- g_temp_tensor_extra_index = (g_temp_tensor_extra_index + 1) % GGML_MAX_NODES;
- struct ggml_tensor_extra_gpu * extra = &g_temp_tensor_extras[alloc_index];
+ g_temp_tensor_extra_index = (g_temp_tensor_extra_index + 1) % GGML_DEFAULT_GRAPH_SIZE;
+ ggml_tensor_extra_gpu * extra = &g_temp_tensor_extras[alloc_index];
memset(extra, 0, sizeof(*extra));
return extra;
}
-void ggml_cuda_assign_buffers_impl(struct ggml_tensor * tensor, bool scratch, bool force_inplace, bool no_alloc) {
+static void ggml_cuda_assign_buffers_impl(struct ggml_tensor * tensor, bool scratch, bool force_inplace, bool no_alloc) {
if (scratch && g_scratch_size == 0) {
return;
}
+ tensor->backend = GGML_BACKEND_GPU;
+
// recursively assign CUDA buffers until a compute tensor is found
if (tensor->src[0] != nullptr && tensor->src[0]->backend == GGML_BACKEND_CPU) {
const ggml_op src0_op = tensor->src[0]->op;
ggml_cuda_assign_buffers_impl(tensor->src[1], scratch, force_inplace, no_alloc);
}
- tensor->backend = GGML_BACKEND_GPU;
-
if (scratch && no_alloc) {
return;
}
- struct ggml_tensor_extra_gpu * extra;
+ ggml_tensor_extra_gpu * extra;
const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) ||
tensor->op == GGML_OP_VIEW ||
force_inplace;
const size_t size = ggml_nbytes(tensor);
- CUDA_CHECK(cudaSetDevice(g_main_device));
+ CUDA_CHECK(ggml_cuda_set_device(g_main_device));
if (inplace && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) {
- struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra;
+ ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra;
char * src0_ddc = (char *) src0_extra->data_device[g_main_device];
size_t offset = 0;
if (tensor->op == GGML_OP_VIEW) {
extra = ggml_cuda_alloc_temp_tensor_extra();
extra->data_device[g_main_device] = src0_ddc + offset;
} else if (tensor->op == GGML_OP_CPY) {
- struct ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu * ) tensor->src[1]->extra;
+ ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu * ) tensor->src[1]->extra;
void * src1_ddv = src1_extra->data_device[g_main_device];
extra = ggml_cuda_alloc_temp_tensor_extra();
extra->data_device[g_main_device] = src1_ddv;
return;
}
if (g_scratch_buffer == nullptr) {
+ ggml_cuda_set_device(g_main_device);
CUDA_CHECK(cudaMalloc(&g_scratch_buffer, g_scratch_size));
}
- struct ggml_tensor_extra_gpu * extra = ggml_cuda_alloc_temp_tensor_extra();
+ ggml_tensor_extra_gpu * extra = ggml_cuda_alloc_temp_tensor_extra();
const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) ||
tensor->op == GGML_OP_VIEW;
if (inplace && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) {
- struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra;
+ ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra;
char * src0_ddc = (char *) src0_extra->data_device[g_main_device];
size_t view_offset = 0;
if (tensor->op == GGML_OP_VIEW) {
tensor->extra = extra;
}
+void ggml_cuda_copy_to_device(struct ggml_tensor * tensor) {
+ GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
+ GGML_ASSERT(ggml_is_contiguous(tensor));
+
+ ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
+ CUDA_CHECK(ggml_cuda_set_device(g_main_device));
+ CUDA_CHECK(cudaMemcpy(extra->data_device[g_main_device], tensor->data, ggml_nbytes(tensor), cudaMemcpyHostToDevice));
+}
+
void ggml_cuda_assign_buffers(struct ggml_tensor * tensor) {
ggml_cuda_assign_buffers_impl(tensor, true, false, false);
}
ggml_cuda_assign_buffers_impl(tensor, false, true, false);
}
-void ggml_cuda_set_main_device(int main_device) {
+void ggml_cuda_set_main_device(const int main_device) {
if (main_device >= g_device_count) {
fprintf(stderr, "warning: cannot set main_device=%d because there are only %d devices. Using device %d instead.\n",
main_device, g_device_count, g_main_device);
}
}
-void ggml_cuda_set_mul_mat_q(bool mul_mat_q) {
- g_mul_mat_q = mul_mat_q;
-}
-
-void ggml_cuda_set_scratch_size(size_t scratch_size) {
- g_scratch_size = scratch_size;
+void ggml_cuda_set_scratch_size(const size_t scratch_size) {
+ // this is a hack to not completely break llama.cpp when using multiple models or contexts simultaneously
+ // it still won't always work as expected, but it's better than nothing
+ if (scratch_size > g_scratch_size) {
+ ggml_cuda_free_scratch();
+ }
+ g_scratch_size = std::max(g_scratch_size, scratch_size);
}
void ggml_cuda_free_scratch() {
g_scratch_buffer = nullptr;
}
-bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor){
+bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
ggml_cuda_func_t func;
const bool any_on_device = tensor->backend == GGML_BACKEND_GPU
|| (tensor->src[0] != nullptr && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT))
|| (tensor->src[1] != nullptr && tensor->src[1]->backend == GGML_BACKEND_GPU);
+ if (!any_on_device && tensor->op != GGML_OP_MUL_MAT) {
+ return false;
+ }
+
switch (tensor->op) {
+ case GGML_OP_REPEAT:
+ func = ggml_cuda_repeat;
+ break;
+ case GGML_OP_GET_ROWS:
+ func = ggml_cuda_get_rows;
+ break;
case GGML_OP_DUP:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_dup;
break;
case GGML_OP_ADD:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_add;
break;
case GGML_OP_MUL:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_mul;
break;
case GGML_OP_UNARY:
switch (ggml_get_unary_op(tensor)) {
case GGML_UNARY_OP_GELU:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_gelu;
break;
case GGML_UNARY_OP_SILU:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_silu;
break;
default:
return false;
} break;
case GGML_OP_NORM:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_norm;
break;
case GGML_OP_RMS_NORM:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_rms_norm;
break;
case GGML_OP_MUL_MAT:
func = ggml_cuda_mul_mat;
break;
case GGML_OP_SCALE:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_scale;
break;
- case GGML_OP_CPY:
+ case GGML_OP_CLAMP:
if (!any_on_device) {
return false;
}
+ func = ggml_cuda_clamp;
+ break;
+ case GGML_OP_CPY:
func = ggml_cuda_cpy;
break;
case GGML_OP_CONT:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_dup;
break;
case GGML_OP_RESHAPE:
case GGML_OP_VIEW:
case GGML_OP_PERMUTE:
case GGML_OP_TRANSPOSE:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_nop;
break;
case GGML_OP_DIAG_MASK_INF:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_diag_mask_inf;
break;
case GGML_OP_SOFT_MAX:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_soft_max;
break;
case GGML_OP_ROPE:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_rope;
break;
case GGML_OP_ALIBI:
- if (!any_on_device) {
- return false;
- }
func = ggml_cuda_alibi;
break;
default:
CUDA_CHECK(cudaGetDeviceProperties(&prop, device));
snprintf(description, description_size, "%s", prop.name);
}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// backend interface
+
+#define UNUSED GGML_UNUSED
+
+struct ggml_backend_context_cuda {
+};
+
+static const char * ggml_backend_cuda_name(ggml_backend_t backend) {
+ return GGML_CUDA_NAME;
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cuda_free(ggml_backend_t backend) {
+ ggml_backend_context_cuda * cuda_ctx = (ggml_backend_context_cuda *)backend->context;
+ delete cuda_ctx;
+ delete backend;
+}
+
+struct ggml_backend_buffer_context_cuda {
+ void * device;
+
+ ggml_tensor_extra_gpu * temp_tensor_extras = nullptr;
+ size_t temp_tensor_extra_index = 0;
+
+ ~ggml_backend_buffer_context_cuda() {
+ delete[] temp_tensor_extras;
+ }
+
+ ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() {
+ if (temp_tensor_extras == nullptr) {
+ temp_tensor_extras = new ggml_tensor_extra_gpu[GGML_DEFAULT_GRAPH_SIZE];
+ }
+
+ size_t alloc_index = temp_tensor_extra_index;
+ temp_tensor_extra_index = (temp_tensor_extra_index + 1) % GGML_DEFAULT_GRAPH_SIZE;
+ ggml_tensor_extra_gpu * extra = &temp_tensor_extras[alloc_index];
+ memset(extra, 0, sizeof(*extra));
+
+ return extra;
+ }
+};
+
+static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) {
+ ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context;
+ CUDA_CHECK(cudaFree(ctx->device));
+ delete ctx;
+}
+
+static void * ggml_backend_cuda_buffer_get_base(ggml_backend_buffer_t buffer) {
+ ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context;
+ return ctx->device;
+}
+
+static size_t ggml_backend_cuda_buffer_get_alloc_size(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
+ int64_t row_low = 0;
+ int64_t row_high = ggml_nrows(tensor);
+ int64_t nrows_split = row_high - row_low;
+
+ size_t size = ggml_nbytes_split(tensor, nrows_split);
+
+ int64_t ne0 = tensor->ne[0];
+
+ if (ggml_is_quantized(tensor->type)) {
+ if (ne0 % MATRIX_ROW_PADDING != 0) {
+ size += (MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING)
+ * ggml_type_size(tensor->type)/ggml_blck_size(tensor->type);
+ }
+ }
+
+ return size;
+
+ UNUSED(buffer);
+}
+
+static void ggml_backend_cuda_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
+ ggml_backend_buffer_context_cuda * ctx = (ggml_backend_buffer_context_cuda *)buffer->context;
+
+ if (tensor->view_src != NULL && tensor->view_offs == 0) {
+ assert(tensor->view_src->buffer->backend == buffer->backend);
+ tensor->backend = tensor->view_src->backend;
+ tensor->extra = tensor->view_src->extra;
+ return;
+ }
+
+ ggml_tensor_extra_gpu * extra = ctx->ggml_cuda_alloc_temp_tensor_extra();
+
+ extra->data_device[g_main_device] = tensor->data;
+
+ tensor->backend = GGML_BACKEND_GPU;
+ tensor->extra = extra;
+
+ if (ggml_is_quantized(tensor->type)) {
+ // initialize padding to 0 to avoid possible NaN values
+ int64_t row_low = 0;
+ int64_t row_high = ggml_nrows(tensor);
+ int64_t nrows_split = row_high - row_low;
+
+ size_t original_size = ggml_nbytes_split(tensor, nrows_split);
+ size_t padded_size = ggml_backend_cuda_buffer_get_alloc_size(tensor->buffer, tensor);
+
+ if (padded_size > original_size && tensor->view_src == nullptr) {
+ CUDA_CHECK(cudaMemsetAsync((char *)tensor->data + original_size, 0, padded_size - original_size, g_cudaStreams[g_main_device][0]));
+ }
+ }
+
+ UNUSED(buffer);
+}
+
+static struct ggml_backend_buffer_i cuda_backend_buffer_interface = {
+ /* .free_buffer = */ ggml_backend_cuda_buffer_free_buffer,
+ /* .get_base = */ ggml_backend_cuda_buffer_get_base,
+ /* .get_alloc_size = */ ggml_backend_cuda_buffer_get_alloc_size,
+ /* .init_tensor = */ ggml_backend_cuda_buffer_init_tensor,
+ /* .free_tensor = */ NULL,
+};
+
+static ggml_backend_buffer_t ggml_backend_cuda_alloc_buffer(ggml_backend_t backend, size_t size) {
+ ggml_cuda_set_device(g_main_device);
+
+ ggml_backend_buffer_context_cuda * ctx = new ggml_backend_buffer_context_cuda;
+
+ size = std::max(size, (size_t)1); // cudaMalloc returns null for size 0
+
+ ggml_cuda_set_device(g_main_device);
+ CUDA_CHECK(cudaMalloc(&ctx->device, size));
+
+ return ggml_backend_buffer_init(backend, cuda_backend_buffer_interface, ctx, size);
+}
+
+static size_t ggml_backend_cuda_get_alignment(ggml_backend_t backend) {
+ return 128;
+ UNUSED(backend);
+}
+
+static void ggml_backend_cuda_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
+ GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
+ GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
+
+ CUDA_CHECK(cudaMemcpyAsync((char *)tensor->data + offset, data, size, cudaMemcpyHostToDevice, g_cudaStreams[g_main_device][0]));
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cuda_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
+ GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
+ GGML_ASSERT(tensor->backend == GGML_BACKEND_GPU);
+
+ CUDA_CHECK(cudaMemcpyAsync(data, (const char *)tensor->data + offset, size, cudaMemcpyDeviceToHost, g_cudaStreams[g_main_device][0]));
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_cuda_synchronize(ggml_backend_t backend) {
+ CUDA_CHECK(cudaStreamSynchronize(g_cudaStreams[g_main_device][0]));
+
+ UNUSED(backend);
+}
+
+static ggml_backend_graph_plan_t ggml_backend_cuda_graph_plan_create(ggml_backend_t backend, ggml_cgraph * cgraph) {
+ GGML_ASSERT(!"not implemented");
+
+ return nullptr;
+
+ UNUSED(backend);
+ UNUSED(cgraph);
+}
+
+static void ggml_backend_cuda_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
+ GGML_ASSERT(!"not implemented");
+
+ UNUSED(backend);
+ UNUSED(plan);
+}
+
+static void ggml_backend_cuda_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
+ GGML_ASSERT(!"not implemented");
+
+ UNUSED(backend);
+ UNUSED(plan);
+}
+
+static void ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
+ ggml_cuda_set_device(g_main_device);
+
+ ggml_compute_params params = {};
+ params.type = GGML_TASK_COMPUTE;
+ params.ith = 0;
+ for (int i = 0; i < cgraph->n_nodes; i++) {
+ ggml_tensor * node = cgraph->nodes[i];
+
+ if (node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE)
+ continue;
+ assert(node->backend == GGML_BACKEND_GPU);
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ if (node->src[j] != nullptr) {
+ assert(node->src[j]->backend == GGML_BACKEND_GPU);
+ }
+ }
+
+ bool ok = ggml_cuda_compute_forward(¶ms, node);
+ if (!ok) {
+ fprintf(stderr, "%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
+ }
+ GGML_ASSERT(ok);
+
+#if 0
+ if (node->type == GGML_TYPE_F32) {
+ cudaDeviceSynchronize();
+ std::vector<float> tmp(ggml_nelements(node), 0.0f);
+ cudaMemcpy(tmp.data(), node->data, ggml_nelements(node)*sizeof(float), cudaMemcpyDeviceToHost);
+ printf("\n%s (%s) (%s %s) (%s %s): ", node->name, ggml_op_name(node->op),
+ ggml_type_name(node->src[0]->type),
+ node->src[1] ? ggml_type_name(node->src[1]->type) : "none",
+ node->src[0]->name,
+ node->src[1] ? node->src[1]->name : "none");
+ double sum = 0.0;
+ double sq_sum = 0.0;
+ for (int i = 0; i < ggml_nelements(node); i++) {
+ printf("%f ", tmp[i]);
+ sum += tmp[i];
+ sq_sum += tmp[i]*tmp[i];
+ }
+ printf("\n");
+ printf("sum: %f, ", sum);
+ printf("sq_sum: %f\n", sq_sum);
+ }
+#endif
+ }
+
+ UNUSED(backend);
+}
+
+static ggml_backend_i cuda_backend_i = {
+ /* .get_name = */ ggml_backend_cuda_name,
+ /* .free = */ ggml_backend_cuda_free,
+ /* .alloc_buffer = */ ggml_backend_cuda_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_cuda_get_alignment,
+ /* .set_tensor_async = */ ggml_backend_cuda_set_tensor_async,
+ /* .get_tensor_async = */ ggml_backend_cuda_get_tensor_async,
+ /* .synchronize = */ ggml_backend_cuda_synchronize,
+ /* .cpy_tensor_from = */ nullptr,
+ /* .cpy_tensor_to = */ nullptr,
+ /* .graph_plan_create = */ ggml_backend_cuda_graph_plan_create,
+ /* .graph_plan_free = */ ggml_backend_cuda_graph_plan_free,
+ /* .graph_plan_compute = */ ggml_backend_cuda_graph_plan_compute,
+ /* .graph_compute = */ ggml_backend_cuda_graph_compute,
+ /* .supports_op = */ nullptr,
+};
+
+ggml_backend_t ggml_backend_cuda_init() {
+ ggml_init_cublas(); // TODO: remove from ggml.c
+
+ ggml_backend_context_cuda * ctx = new ggml_backend_context_cuda;
+
+ ggml_backend_t cuda_backend = new ggml_backend {
+ /* .interface = */ cuda_backend_i,
+ /* .context = */ ctx
+ };
+
+ return cuda_backend;
+}
#pragma once
#include "ggml.h"
+#include "ggml-backend.h"
#ifdef GGML_USE_HIPBLAS
#define GGML_CUDA_NAME "ROCm"
GGML_API void ggml_cuda_assign_buffers_no_alloc(struct ggml_tensor * tensor);
GGML_API void ggml_cuda_assign_scratch_offset(struct ggml_tensor * tensor, size_t offset);
+GGML_API void ggml_cuda_copy_to_device(struct ggml_tensor * tensor);
GGML_API void ggml_cuda_set_main_device(int main_device);
GGML_API void ggml_cuda_set_mul_mat_q(bool mul_mat_q);
GGML_API int ggml_cuda_get_device_count(void);
GGML_API void ggml_cuda_get_device_description(int device, char * description, size_t description_size);
+// backend API
+GGML_API ggml_backend_t ggml_backend_cuda_init(void); // TODO: take a list of devices to use
+
#ifdef __cplusplus
}
#endif
--- /dev/null
+#pragma once
+
+#include "ggml.h"
+
+// GGML internal header
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <string.h> // memcpy
+#include <math.h> // fabsf
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// static_assert should be a #define, but if it's not,
+// fall back to the _Static_assert C11 keyword.
+// if C99 - static_assert is noop
+// ref: https://stackoverflow.com/a/53923785/4039976
+#ifndef static_assert
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
+#define static_assert(cond, msg) _Static_assert(cond, msg)
+#else
+#define static_assert(cond, msg) struct global_scope_noop_trick
+#endif
+#endif
+
+// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
+#if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
+#ifndef __FMA__
+#define __FMA__
+#endif
+#ifndef __F16C__
+#define __F16C__
+#endif
+#ifndef __SSE3__
+#define __SSE3__
+#endif
+#endif
+
+// 16-bit float
+// on Arm, we use __fp16
+// on x86, we use uint16_t
+#if defined(__ARM_NEON) && !defined(_MSC_VER)
+
+// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
+//
+// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
+//
+#include <arm_neon.h>
+
+#define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
+#define GGML_COMPUTE_FP32_TO_FP16(x) (x)
+
+#define GGML_FP16_TO_FP32(x) ((float) (x))
+#define GGML_FP32_TO_FP16(x) (x)
+
+#else
+
+#ifdef __wasm_simd128__
+#include <wasm_simd128.h>
+#else
+#ifdef __POWER9_VECTOR__
+#include <altivec.h>
+#undef bool
+#define bool _Bool
+#else
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#include <intrin.h>
+#else
+#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
+#if !defined(__riscv)
+#include <immintrin.h>
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifdef __riscv_v_intrinsic
+#include <riscv_vector.h>
+#endif
+
+#ifdef __F16C__
+
+#ifdef _MSC_VER
+#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
+#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
+#else
+#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
+#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
+#endif
+
+#elif defined(__POWER9_VECTOR__)
+
+#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
+#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
+/* the inline asm below is about 12% faster than the lookup method */
+#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
+#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
+
+static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
+ register float f;
+ register double d;
+ __asm__(
+ "mtfprd %0,%2\n"
+ "xscvhpdp %0,%0\n"
+ "frsp %1,%0\n" :
+ /* temp */ "=d"(d),
+ /* out */ "=f"(f):
+ /* in */ "r"(h));
+ return f;
+}
+
+static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
+ register double d;
+ register ggml_fp16_t r;
+ __asm__( /* xscvdphp can work on double or single precision */
+ "xscvdphp %0,%2\n"
+ "mffprd %1,%0\n" :
+ /* temp */ "=d"(d),
+ /* out */ "=r"(r):
+ /* in */ "f"(f));
+ return r;
+}
+
+#else
+
+// FP16 <-> FP32
+// ref: https://github.com/Maratyszcza/FP16
+
+static inline float fp32_from_bits(uint32_t w) {
+ union {
+ uint32_t as_bits;
+ float as_value;
+ } fp32;
+ fp32.as_bits = w;
+ return fp32.as_value;
+}
+
+static inline uint32_t fp32_to_bits(float f) {
+ union {
+ float as_value;
+ uint32_t as_bits;
+ } fp32;
+ fp32.as_value = f;
+ return fp32.as_bits;
+}
+
+static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
+ const uint32_t w = (uint32_t) h << 16;
+ const uint32_t sign = w & UINT32_C(0x80000000);
+ const uint32_t two_w = w + w;
+
+ const uint32_t exp_offset = UINT32_C(0xE0) << 23;
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
+ const float exp_scale = 0x1.0p-112f;
+#else
+ const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
+#endif
+ const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
+
+ const uint32_t magic_mask = UINT32_C(126) << 23;
+ const float magic_bias = 0.5f;
+ const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
+
+ const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
+ const uint32_t result = sign |
+ (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
+ return fp32_from_bits(result);
+}
+
+static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
+ const float scale_to_inf = 0x1.0p+112f;
+ const float scale_to_zero = 0x1.0p-110f;
+#else
+ const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
+ const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
+#endif
+ float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
+
+ const uint32_t w = fp32_to_bits(f);
+ const uint32_t shl1_w = w + w;
+ const uint32_t sign = w & UINT32_C(0x80000000);
+ uint32_t bias = shl1_w & UINT32_C(0xFF000000);
+ if (bias < UINT32_C(0x71000000)) {
+ bias = UINT32_C(0x71000000);
+ }
+
+ base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
+ const uint32_t bits = fp32_to_bits(base);
+ const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
+ const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
+ const uint32_t nonsign = exp_bits + mantissa_bits;
+ return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
+}
+
+#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
+#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
+
+#endif // __F16C__
+
+#endif // __ARM_NEON
+
+// precomputed f32 table for f16 (256 KB)
+// defined in ggml.c, initialized in ggml_init()
+extern float ggml_table_f32_f16[1 << 16];
+
+// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
+// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
+// This is also true for POWER9.
+#if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
+
+inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
+ uint16_t s;
+ memcpy(&s, &f, sizeof(uint16_t));
+ return ggml_table_f32_f16[s];
+}
+
+#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
+#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
+
+#endif
+
+#define GGML_HASHTABLE_FULL ((size_t)-1)
+#define GGML_HASHTABLE_ALREADY_EXISTS ((size_t)-2)
+
+bool ggml_hash_contains (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
+
+// returns GGML_HASHTABLE_FULL if table is full, otherwise the current index of the key or where it should be inserted
+size_t ggml_hash_find (const struct ggml_hash_set hash_set, struct ggml_tensor * key);
+
+// returns GGML_HAHSHTABLE_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
+size_t ggml_hash_insert ( struct ggml_hash_set hash_set, struct ggml_tensor * key);
+
+// return index, asserts if table is full
+size_t ggml_hash_find_or_insert( struct ggml_hash_set hash_set, struct ggml_tensor * key);
+
+#ifdef __cplusplus
+}
+#endif
#pragma once
+#include "ggml.h"
+#include "ggml-backend.h"
+
#include <stddef.h>
#include <stdbool.h>
extern "C" {
#endif
+//
+// internal API
+// temporary exposed to user-code
+//
+
struct ggml_metal_context;
+void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_data);
+
// number of command buffers to use
struct ggml_metal_context * ggml_metal_init(int n_cb);
void ggml_metal_free(struct ggml_metal_context * ctx);
// creates gf->n_threads command buffers in parallel
void ggml_metal_graph_compute(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);
+//
+// backend API
+// user-code should use only these functions
+//
+
+GGML_API ggml_backend_t ggml_backend_metal_init(void);
+
+GGML_API bool ggml_backend_is_metal(ggml_backend_t backend);
+
+GGML_API void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb);
+
#ifdef __cplusplus
}
#endif
#import "ggml-metal.h"
+#import "ggml-backend-impl.h"
#import "ggml.h"
#import <Foundation/Foundation.h>
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
-// TODO: temporary - reuse llama.cpp logging
#ifdef GGML_METAL_NDEBUG
-#define metal_printf(...)
+#define GGML_METAL_LOG_INFO(...)
+#define GGML_METAL_LOG_WARN(...)
+#define GGML_METAL_LOG_ERROR(...)
#else
-#define metal_printf(...) fprintf(stderr, __VA_ARGS__)
+#define GGML_METAL_LOG_INFO(...) ggml_metal_log(GGML_LOG_LEVEL_INFO, __VA_ARGS__)
+#define GGML_METAL_LOG_WARN(...) ggml_metal_log(GGML_LOG_LEVEL_WARN, __VA_ARGS__)
+#define GGML_METAL_LOG_ERROR(...) ggml_metal_log(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
#endif
#define UNUSED(x) (void)(x)
-#define GGML_MAX_CONCUR (2*GGML_MAX_NODES)
+#define GGML_MAX_CONCUR (2*GGML_DEFAULT_GRAPH_SIZE)
struct ggml_metal_buffer {
const char * name;
GGML_METAL_DECL_KERNEL(mul);
GGML_METAL_DECL_KERNEL(mul_row); // TODO: avoid this extra kernel, instead extend the "mul" kernel to support broadcast
GGML_METAL_DECL_KERNEL(scale);
+ GGML_METAL_DECL_KERNEL(scale_4);
GGML_METAL_DECL_KERNEL(silu);
GGML_METAL_DECL_KERNEL(relu);
GGML_METAL_DECL_KERNEL(gelu);
GGML_METAL_DECL_KERNEL(get_rows_f16);
GGML_METAL_DECL_KERNEL(get_rows_q4_0);
GGML_METAL_DECL_KERNEL(get_rows_q4_1);
+ GGML_METAL_DECL_KERNEL(get_rows_q5_0);
+ GGML_METAL_DECL_KERNEL(get_rows_q5_1);
GGML_METAL_DECL_KERNEL(get_rows_q8_0);
GGML_METAL_DECL_KERNEL(get_rows_q2_K);
GGML_METAL_DECL_KERNEL(get_rows_q3_K);
GGML_METAL_DECL_KERNEL(get_rows_q6_K);
GGML_METAL_DECL_KERNEL(rms_norm);
GGML_METAL_DECL_KERNEL(norm);
- GGML_METAL_DECL_KERNEL(mul_mat_f32_f32);
- GGML_METAL_DECL_KERNEL(mul_mat_f16_f32);
- GGML_METAL_DECL_KERNEL(mul_mat_f16_f32_1row);
- GGML_METAL_DECL_KERNEL(mul_mat_f16_f32_l4);
- GGML_METAL_DECL_KERNEL(mul_mat_q4_0_f32);
- GGML_METAL_DECL_KERNEL(mul_mat_q4_1_f32);
- GGML_METAL_DECL_KERNEL(mul_mat_q8_0_f32);
- GGML_METAL_DECL_KERNEL(mul_mat_q2_K_f32);
- GGML_METAL_DECL_KERNEL(mul_mat_q3_K_f32);
- GGML_METAL_DECL_KERNEL(mul_mat_q4_K_f32);
- GGML_METAL_DECL_KERNEL(mul_mat_q5_K_f32);
- GGML_METAL_DECL_KERNEL(mul_mat_q6_K_f32);
+ GGML_METAL_DECL_KERNEL(mul_mv_f32_f32);
+ GGML_METAL_DECL_KERNEL(mul_mv_f16_f32);
+ GGML_METAL_DECL_KERNEL(mul_mv_f16_f32_1row);
+ GGML_METAL_DECL_KERNEL(mul_mv_f16_f32_l4);
+ GGML_METAL_DECL_KERNEL(mul_mv_q4_0_f32);
+ GGML_METAL_DECL_KERNEL(mul_mv_q4_1_f32);
+ GGML_METAL_DECL_KERNEL(mul_mv_q5_0_f32);
+ GGML_METAL_DECL_KERNEL(mul_mv_q5_1_f32);
+ GGML_METAL_DECL_KERNEL(mul_mv_q8_0_f32);
+ GGML_METAL_DECL_KERNEL(mul_mv_q2_K_f32);
+ GGML_METAL_DECL_KERNEL(mul_mv_q3_K_f32);
+ GGML_METAL_DECL_KERNEL(mul_mv_q4_K_f32);
+ GGML_METAL_DECL_KERNEL(mul_mv_q5_K_f32);
+ GGML_METAL_DECL_KERNEL(mul_mv_q6_K_f32);
GGML_METAL_DECL_KERNEL(mul_mm_f32_f32);
GGML_METAL_DECL_KERNEL(mul_mm_f16_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q4_0_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q4_1_f32);
+ GGML_METAL_DECL_KERNEL(mul_mm_q5_0_f32);
+ GGML_METAL_DECL_KERNEL(mul_mm_q5_1_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q8_0_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q2_K_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q3_K_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q4_K_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q5_K_f32);
GGML_METAL_DECL_KERNEL(mul_mm_q6_K_f32);
- GGML_METAL_DECL_KERNEL(rope);
+ GGML_METAL_DECL_KERNEL(rope_f32);
+ GGML_METAL_DECL_KERNEL(rope_f16);
GGML_METAL_DECL_KERNEL(alibi_f32);
GGML_METAL_DECL_KERNEL(cpy_f32_f16);
GGML_METAL_DECL_KERNEL(cpy_f32_f32);
GGML_METAL_DECL_KERNEL(cpy_f16_f16);
+ GGML_METAL_DECL_KERNEL(concat);
+ GGML_METAL_DECL_KERNEL(sqr);
#undef GGML_METAL_DECL_KERNEL
};
@implementation GGMLMetalClass
@end
+ggml_log_callback ggml_metal_log_callback = NULL;
+void * ggml_metal_log_user_data = NULL;
+
+void ggml_metal_log_set_callback(ggml_log_callback log_callback, void * user_data) {
+ ggml_metal_log_callback = log_callback;
+ ggml_metal_log_user_data = user_data;
+}
+
+static void ggml_metal_log(enum ggml_log_level level, const char* format, ...){
+ if (ggml_metal_log_callback != NULL) {
+ va_list args;
+ va_start(args, format);
+ char buffer[128];
+ int len = vsnprintf(buffer, 128, format, args);
+ if (len < 128) {
+ ggml_metal_log_callback(level, buffer, ggml_metal_log_user_data);
+ } else {
+ char* buffer2 = malloc(len+1);
+ vsnprintf(buffer2, len+1, format, args);
+ buffer2[len] = 0;
+ ggml_metal_log_callback(level, buffer2, ggml_metal_log_user_data);
+ free(buffer2);
+ }
+ va_end(args);
+ }
+}
+
+
+
struct ggml_metal_context * ggml_metal_init(int n_cb) {
- metal_printf("%s: allocating\n", __func__);
+ GGML_METAL_LOG_INFO("%s: allocating\n", __func__);
id <MTLDevice> device;
NSString * s;
NSArray * devices = MTLCopyAllDevices();
for (device in devices) {
s = [device name];
- metal_printf("%s: found device: %s\n", __func__, [s UTF8String]);
+ GGML_METAL_LOG_INFO("%s: found device: %s\n", __func__, [s UTF8String]);
}
#endif
// Pick and show default Metal device
device = MTLCreateSystemDefaultDevice();
s = [device name];
- metal_printf("%s: picking default device: %s\n", __func__, [s UTF8String]);
+ GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [s UTF8String]);
// Configure context
struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context));
ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT);
-#ifdef GGML_SWIFT
- // load the default.metallib file
+ // load library
{
- NSError * error = nil;
-
- NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
- NSString * llamaBundlePath = [bundle pathForResource:@"llama_llama" ofType:@"bundle"];
- NSBundle * llamaBundle = [NSBundle bundleWithPath:llamaBundlePath];
- NSString * libPath = [llamaBundle pathForResource:@"default" ofType:@"metallib"];
- NSURL * libURL = [NSURL fileURLWithPath:libPath];
-
- // Load the metallib file into a Metal library
- ctx->library = [ctx->device newLibraryWithURL:libURL error:&error];
-
- if (error) {
- metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]);
- return NULL;
- }
- }
+ NSBundle * bundle = nil;
+#ifdef SWIFT_PACKAGE
+ bundle = SWIFTPM_MODULE_BUNDLE;
#else
- UNUSED(msl_library_source);
-
- // read the source from "ggml-metal.metal" into a string and use newLibraryWithSource
- {
+ bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
+#endif
NSError * error = nil;
+ NSString * libPath = [bundle pathForResource:@"default" ofType:@"metallib"];
+ if (libPath != nil) {
+ NSURL * libURL = [NSURL fileURLWithPath:libPath];
+ GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [libPath UTF8String]);
+ ctx->library = [ctx->device newLibraryWithURL:libURL error:&error];
+ } else {
+ GGML_METAL_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__);
+
+ NSString * sourcePath;
+ NSString * ggmlMetalPathResources = [[NSProcessInfo processInfo].environment objectForKey:@"GGML_METAL_PATH_RESOURCES"];
+ if (ggmlMetalPathResources) {
+ sourcePath = [ggmlMetalPathResources stringByAppendingPathComponent:@"ggml-metal.metal"];
+ } else {
+ sourcePath = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
+ }
+ if (sourcePath == nil) {
+ GGML_METAL_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__);
+ sourcePath = @"ggml-metal.metal";
+ }
+ GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [sourcePath UTF8String]);
+ NSString * src = [NSString stringWithContentsOfFile:sourcePath encoding:NSUTF8StringEncoding error:&error];
+ if (error) {
+ GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
+ return NULL;
+ }
- //NSString * path = [[NSBundle mainBundle] pathForResource:@"../../examples/metal/metal" ofType:@"metal"];
- NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
- NSString * path = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
- metal_printf("%s: loading '%s'\n", __func__, [path UTF8String]);
-
- NSString * src = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:&error];
- if (error) {
- metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]);
- return NULL;
- }
-
+ MTLCompileOptions* options = nil;
#ifdef GGML_QKK_64
- MTLCompileOptions* options = [MTLCompileOptions new];
- options.preprocessorMacros = @{ @"QK_K" : @(64) };
- ctx->library = [ctx->device newLibraryWithSource:src options:options error:&error];
-#else
- ctx->library = [ctx->device newLibraryWithSource:src options:nil error:&error];
+ options = [MTLCompileOptions new];
+ options.preprocessorMacros = @{ @"QK_K" : @(64) };
#endif
+ ctx->library = [ctx->device newLibraryWithSource:src options:options error:&error];
+ }
+
if (error) {
- metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]);
+ GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
return NULL;
}
}
-#endif
// load kernels
{
NSError * error = nil;
+
+ /*
+ GGML_METAL_LOG_INFO("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name, \
+ (int) ctx->pipeline_##name.maxTotalThreadsPerThreadgroup, \
+ (int) ctx->pipeline_##name.threadExecutionWidth); \
+ */
#define GGML_METAL_ADD_KERNEL(name) \
ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \
ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \
- metal_printf("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name, \
- (int) ctx->pipeline_##name.maxTotalThreadsPerThreadgroup, \
- (int) ctx->pipeline_##name.threadExecutionWidth); \
if (error) { \
- metal_printf("%s: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \
+ GGML_METAL_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \
return NULL; \
}
GGML_METAL_ADD_KERNEL(mul);
GGML_METAL_ADD_KERNEL(mul_row);
GGML_METAL_ADD_KERNEL(scale);
+ GGML_METAL_ADD_KERNEL(scale_4);
GGML_METAL_ADD_KERNEL(silu);
GGML_METAL_ADD_KERNEL(relu);
GGML_METAL_ADD_KERNEL(gelu);
GGML_METAL_ADD_KERNEL(get_rows_f16);
GGML_METAL_ADD_KERNEL(get_rows_q4_0);
GGML_METAL_ADD_KERNEL(get_rows_q4_1);
+ GGML_METAL_ADD_KERNEL(get_rows_q5_0);
+ GGML_METAL_ADD_KERNEL(get_rows_q5_1);
GGML_METAL_ADD_KERNEL(get_rows_q8_0);
GGML_METAL_ADD_KERNEL(get_rows_q2_K);
GGML_METAL_ADD_KERNEL(get_rows_q3_K);
GGML_METAL_ADD_KERNEL(get_rows_q6_K);
GGML_METAL_ADD_KERNEL(rms_norm);
GGML_METAL_ADD_KERNEL(norm);
- GGML_METAL_ADD_KERNEL(mul_mat_f32_f32);
- GGML_METAL_ADD_KERNEL(mul_mat_f16_f32);
- GGML_METAL_ADD_KERNEL(mul_mat_f16_f32_1row);
- GGML_METAL_ADD_KERNEL(mul_mat_f16_f32_l4);
- GGML_METAL_ADD_KERNEL(mul_mat_q4_0_f32);
- GGML_METAL_ADD_KERNEL(mul_mat_q4_1_f32);
- GGML_METAL_ADD_KERNEL(mul_mat_q8_0_f32);
- GGML_METAL_ADD_KERNEL(mul_mat_q2_K_f32);
- GGML_METAL_ADD_KERNEL(mul_mat_q3_K_f32);
- GGML_METAL_ADD_KERNEL(mul_mat_q4_K_f32);
- GGML_METAL_ADD_KERNEL(mul_mat_q5_K_f32);
- GGML_METAL_ADD_KERNEL(mul_mat_q6_K_f32);
- GGML_METAL_ADD_KERNEL(mul_mm_f32_f32);
- GGML_METAL_ADD_KERNEL(mul_mm_f16_f32);
- GGML_METAL_ADD_KERNEL(mul_mm_q4_0_f32);
- GGML_METAL_ADD_KERNEL(mul_mm_q8_0_f32);
- GGML_METAL_ADD_KERNEL(mul_mm_q4_1_f32);
- GGML_METAL_ADD_KERNEL(mul_mm_q2_K_f32);
- GGML_METAL_ADD_KERNEL(mul_mm_q3_K_f32);
- GGML_METAL_ADD_KERNEL(mul_mm_q4_K_f32);
- GGML_METAL_ADD_KERNEL(mul_mm_q5_K_f32);
- GGML_METAL_ADD_KERNEL(mul_mm_q6_K_f32);
- GGML_METAL_ADD_KERNEL(rope);
+ GGML_METAL_ADD_KERNEL(mul_mv_f32_f32);
+ GGML_METAL_ADD_KERNEL(mul_mv_f16_f32);
+ GGML_METAL_ADD_KERNEL(mul_mv_f16_f32_1row);
+ GGML_METAL_ADD_KERNEL(mul_mv_f16_f32_l4);
+ GGML_METAL_ADD_KERNEL(mul_mv_q4_0_f32);
+ GGML_METAL_ADD_KERNEL(mul_mv_q4_1_f32);
+ GGML_METAL_ADD_KERNEL(mul_mv_q5_0_f32);
+ GGML_METAL_ADD_KERNEL(mul_mv_q5_1_f32);
+ GGML_METAL_ADD_KERNEL(mul_mv_q8_0_f32);
+ GGML_METAL_ADD_KERNEL(mul_mv_q2_K_f32);
+ GGML_METAL_ADD_KERNEL(mul_mv_q3_K_f32);
+ GGML_METAL_ADD_KERNEL(mul_mv_q4_K_f32);
+ GGML_METAL_ADD_KERNEL(mul_mv_q5_K_f32);
+ GGML_METAL_ADD_KERNEL(mul_mv_q6_K_f32);
+ if ([ctx->device supportsFamily:MTLGPUFamilyApple7]) {
+ GGML_METAL_ADD_KERNEL(mul_mm_f32_f32);
+ GGML_METAL_ADD_KERNEL(mul_mm_f16_f32);
+ GGML_METAL_ADD_KERNEL(mul_mm_q4_0_f32);
+ GGML_METAL_ADD_KERNEL(mul_mm_q4_1_f32);
+ GGML_METAL_ADD_KERNEL(mul_mm_q5_0_f32);
+ GGML_METAL_ADD_KERNEL(mul_mm_q5_1_f32);
+ GGML_METAL_ADD_KERNEL(mul_mm_q8_0_f32);
+ GGML_METAL_ADD_KERNEL(mul_mm_q2_K_f32);
+ GGML_METAL_ADD_KERNEL(mul_mm_q3_K_f32);
+ GGML_METAL_ADD_KERNEL(mul_mm_q4_K_f32);
+ GGML_METAL_ADD_KERNEL(mul_mm_q5_K_f32);
+ GGML_METAL_ADD_KERNEL(mul_mm_q6_K_f32);
+ }
+ GGML_METAL_ADD_KERNEL(rope_f32);
+ GGML_METAL_ADD_KERNEL(rope_f16);
GGML_METAL_ADD_KERNEL(alibi_f32);
GGML_METAL_ADD_KERNEL(cpy_f32_f16);
GGML_METAL_ADD_KERNEL(cpy_f32_f32);
GGML_METAL_ADD_KERNEL(cpy_f16_f16);
+ GGML_METAL_ADD_KERNEL(concat);
+ GGML_METAL_ADD_KERNEL(sqr);
#undef GGML_METAL_ADD_KERNEL
}
- metal_printf("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
#if TARGET_OS_OSX
- metal_printf("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
+ // print MTL GPU family:
+ GGML_METAL_LOG_INFO("%s: GPU name: %s\n", __func__, [[ctx->device name] UTF8String]);
+
+ // determine max supported GPU family
+ // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
+ // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
+ for (int i = MTLGPUFamilyApple1 + 20; i >= MTLGPUFamilyApple1; --i) {
+ if ([ctx->device supportsFamily:i]) {
+ GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d (%d)\n", __func__, i - MTLGPUFamilyApple1 + 1, i);
+ break;
+ }
+ }
+
+ GGML_METAL_LOG_INFO("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
+ GGML_METAL_LOG_INFO("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
if (ctx->device.maxTransferRate != 0) {
- metal_printf("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0);
+ GGML_METAL_LOG_INFO("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0);
} else {
- metal_printf("%s: maxTransferRate = built-in GPU\n", __func__);
+ GGML_METAL_LOG_INFO("%s: maxTransferRate = built-in GPU\n", __func__);
}
#endif
}
void ggml_metal_free(struct ggml_metal_context * ctx) {
- metal_printf("%s: deallocating\n", __func__);
+ GGML_METAL_LOG_INFO("%s: deallocating\n", __func__);
#define GGML_METAL_DEL_KERNEL(name) \
[ctx->function_##name release]; \
[ctx->pipeline_##name release];
GGML_METAL_DEL_KERNEL(mul);
GGML_METAL_DEL_KERNEL(mul_row);
GGML_METAL_DEL_KERNEL(scale);
+ GGML_METAL_DEL_KERNEL(scale_4);
GGML_METAL_DEL_KERNEL(silu);
GGML_METAL_DEL_KERNEL(relu);
GGML_METAL_DEL_KERNEL(gelu);
GGML_METAL_DEL_KERNEL(get_rows_f16);
GGML_METAL_DEL_KERNEL(get_rows_q4_0);
GGML_METAL_DEL_KERNEL(get_rows_q4_1);
+ GGML_METAL_DEL_KERNEL(get_rows_q5_0);
+ GGML_METAL_DEL_KERNEL(get_rows_q5_1);
GGML_METAL_DEL_KERNEL(get_rows_q8_0);
GGML_METAL_DEL_KERNEL(get_rows_q2_K);
GGML_METAL_DEL_KERNEL(get_rows_q3_K);
GGML_METAL_DEL_KERNEL(get_rows_q6_K);
GGML_METAL_DEL_KERNEL(rms_norm);
GGML_METAL_DEL_KERNEL(norm);
- GGML_METAL_DEL_KERNEL(mul_mat_f32_f32);
- GGML_METAL_DEL_KERNEL(mul_mat_f16_f32);
- GGML_METAL_DEL_KERNEL(mul_mat_f16_f32_1row);
- GGML_METAL_DEL_KERNEL(mul_mat_f16_f32_l4);
- GGML_METAL_DEL_KERNEL(mul_mat_q4_0_f32);
- GGML_METAL_DEL_KERNEL(mul_mat_q4_1_f32);
- GGML_METAL_DEL_KERNEL(mul_mat_q8_0_f32);
- GGML_METAL_DEL_KERNEL(mul_mat_q2_K_f32);
- GGML_METAL_DEL_KERNEL(mul_mat_q3_K_f32);
- GGML_METAL_DEL_KERNEL(mul_mat_q4_K_f32);
- GGML_METAL_DEL_KERNEL(mul_mat_q5_K_f32);
- GGML_METAL_DEL_KERNEL(mul_mat_q6_K_f32);
- GGML_METAL_DEL_KERNEL(mul_mm_f32_f32);
- GGML_METAL_DEL_KERNEL(mul_mm_f16_f32);
- GGML_METAL_DEL_KERNEL(mul_mm_q4_0_f32);
- GGML_METAL_DEL_KERNEL(mul_mm_q8_0_f32);
- GGML_METAL_DEL_KERNEL(mul_mm_q4_1_f32);
- GGML_METAL_DEL_KERNEL(mul_mm_q2_K_f32);
- GGML_METAL_DEL_KERNEL(mul_mm_q3_K_f32);
- GGML_METAL_DEL_KERNEL(mul_mm_q4_K_f32);
- GGML_METAL_DEL_KERNEL(mul_mm_q5_K_f32);
- GGML_METAL_DEL_KERNEL(mul_mm_q6_K_f32);
- GGML_METAL_DEL_KERNEL(rope);
+ GGML_METAL_DEL_KERNEL(mul_mv_f32_f32);
+ GGML_METAL_DEL_KERNEL(mul_mv_f16_f32);
+ GGML_METAL_DEL_KERNEL(mul_mv_f16_f32_1row);
+ GGML_METAL_DEL_KERNEL(mul_mv_f16_f32_l4);
+ GGML_METAL_DEL_KERNEL(mul_mv_q4_0_f32);
+ GGML_METAL_DEL_KERNEL(mul_mv_q4_1_f32);
+ GGML_METAL_DEL_KERNEL(mul_mv_q5_0_f32);
+ GGML_METAL_DEL_KERNEL(mul_mv_q5_1_f32);
+ GGML_METAL_DEL_KERNEL(mul_mv_q8_0_f32);
+ GGML_METAL_DEL_KERNEL(mul_mv_q2_K_f32);
+ GGML_METAL_DEL_KERNEL(mul_mv_q3_K_f32);
+ GGML_METAL_DEL_KERNEL(mul_mv_q4_K_f32);
+ GGML_METAL_DEL_KERNEL(mul_mv_q5_K_f32);
+ GGML_METAL_DEL_KERNEL(mul_mv_q6_K_f32);
+ if ([ctx->device supportsFamily:MTLGPUFamilyApple7]) {
+ GGML_METAL_DEL_KERNEL(mul_mm_f32_f32);
+ GGML_METAL_DEL_KERNEL(mul_mm_f16_f32);
+ GGML_METAL_DEL_KERNEL(mul_mm_q4_0_f32);
+ GGML_METAL_DEL_KERNEL(mul_mm_q4_1_f32);
+ GGML_METAL_DEL_KERNEL(mul_mm_q5_0_f32);
+ GGML_METAL_DEL_KERNEL(mul_mm_q5_1_f32);
+ GGML_METAL_DEL_KERNEL(mul_mm_q8_0_f32);
+ GGML_METAL_DEL_KERNEL(mul_mm_q2_K_f32);
+ GGML_METAL_DEL_KERNEL(mul_mm_q3_K_f32);
+ GGML_METAL_DEL_KERNEL(mul_mm_q4_K_f32);
+ GGML_METAL_DEL_KERNEL(mul_mm_q5_K_f32);
+ GGML_METAL_DEL_KERNEL(mul_mm_q6_K_f32);
+ }
+ GGML_METAL_DEL_KERNEL(rope_f32);
+ GGML_METAL_DEL_KERNEL(rope_f16);
GGML_METAL_DEL_KERNEL(alibi_f32);
GGML_METAL_DEL_KERNEL(cpy_f32_f16);
GGML_METAL_DEL_KERNEL(cpy_f32_f32);
GGML_METAL_DEL_KERNEL(cpy_f16_f16);
+ GGML_METAL_DEL_KERNEL(concat);
+ GGML_METAL_DEL_KERNEL(sqr);
#undef GGML_METAL_DEL_KERNEL
void * data = NULL;
const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n);
if (result != 0) {
- metal_printf("%s: error: posix_memalign failed\n", __func__);
+ GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__);
return NULL;
}
// Metal buffer based on the host memory pointer
//
static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_metal_context * ctx, struct ggml_tensor * t, size_t * offs) {
- //metal_printf("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach);
+ //GGML_METAL_LOG_INFO("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach);
const int64_t tsize = ggml_nbytes(t);
for (int i = 0; i < ctx->n_buffers; ++i) {
const int64_t ioffs = (int64_t) t->data - (int64_t) ctx->buffers[i].data;
- //metal_printf("ioffs = %10ld, tsize = %10ld, sum = %10ld, ctx->buffers[%d].size = %10ld, name = %s\n", ioffs, tsize, ioffs + tsize, i, ctx->buffers[i].size, ctx->buffers[i].name);
+ //GGML_METAL_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, ctx->buffers[%d].size = %10ld, name = %s\n", ioffs, tsize, ioffs + tsize, i, ctx->buffers[i].size, ctx->buffers[i].name);
if (ioffs >= 0 && ioffs + tsize <= (int64_t) ctx->buffers[i].size) {
*offs = (size_t) ioffs;
- //metal_printf("%s: '%s' tensor '%16s', offs = %8ld\n", __func__, ctx->buffers[i].name, t->name, *offs);
+ //GGML_METAL_LOG_INFO("%s: '%s' tensor '%16s', offs = %8ld\n", __func__, ctx->buffers[i].name, t->name, *offs);
return ctx->buffers[i].metal;
}
}
- metal_printf("%s: error: buffer is nil\n", __func__);
+ GGML_METAL_LOG_ERROR("%s: error: buffer is nil\n", __func__);
return nil;
}
size_t size,
size_t max_size) {
if (ctx->n_buffers >= GGML_METAL_MAX_BUFFERS) {
- metal_printf("%s: too many buffers\n", __func__);
+ GGML_METAL_LOG_ERROR("%s: error: too many buffers\n", __func__);
return false;
}
const int64_t ioffs = (int64_t) data - (int64_t) ctx->buffers[i].data;
if (ioffs >= 0 && ioffs < (int64_t) ctx->buffers[i].size) {
- metal_printf("%s: error: buffer '%s' overlaps with '%s'\n", __func__, name, ctx->buffers[i].name);
+ GGML_METAL_LOG_ERROR("%s: error: buffer '%s' overlaps with '%s'\n", __func__, name, ctx->buffers[i].name);
return false;
}
}
ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
if (ctx->buffers[ctx->n_buffers].metal == nil) {
- metal_printf("%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_aligned / 1024.0 / 1024.0);
+ GGML_METAL_LOG_ERROR("%s: error: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_aligned / 1024.0 / 1024.0);
return false;
}
- metal_printf("%s: allocated '%-16s' buffer, size = %8.2f MB", __func__, name, size_aligned / 1024.0 / 1024.0);
+ GGML_METAL_LOG_INFO("%s: allocated '%-16s' buffer, size = %8.2f MB", __func__, name, size_aligned / 1024.0 / 1024.0);
++ctx->n_buffers;
} else {
ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
if (ctx->buffers[ctx->n_buffers].metal == nil) {
- metal_printf("%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_step_aligned / 1024.0 / 1024.0);
+ GGML_METAL_LOG_ERROR("%s: error: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_step_aligned / 1024.0 / 1024.0);
return false;
}
- metal_printf("%s: allocated '%-16s' buffer, size = %8.2f MB, offs = %12ld", __func__, name, size_step_aligned / 1024.0 / 1024.0, i);
+ GGML_METAL_LOG_INFO("%s: allocated '%-16s' buffer, size = %8.2f MB, offs = %12ld", __func__, name, size_step_aligned / 1024.0 / 1024.0, i);
if (i + size_step < size) {
- metal_printf("\n");
+ GGML_METAL_LOG_INFO("\n");
}
++ctx->n_buffers;
}
#if TARGET_OS_OSX
- metal_printf(", (%8.2f / %8.2f)",
+ GGML_METAL_LOG_INFO(", (%8.2f / %8.2f)",
ctx->device.currentAllocatedSize / 1024.0 / 1024.0,
ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
if (ctx->device.currentAllocatedSize > ctx->device.recommendedMaxWorkingSetSize) {
- metal_printf(", warning: current allocated size is greater than the recommended max working set size\n");
+ GGML_METAL_LOG_WARN(", warning: current allocated size is greater than the recommended max working set size\n", __func__);
} else {
- metal_printf("\n");
+ GGML_METAL_LOG_INFO("\n");
}
#else
- metal_printf(", (%8.2f)\n", ctx->device.currentAllocatedSize / 1024.0 / 1024.0);
+ GGML_METAL_LOG_INFO(", (%8.2f)\n", ctx->device.currentAllocatedSize / 1024.0 / 1024.0);
#endif
}
}
if (ctx->concur_list_len > GGML_MAX_CONCUR) {
- metal_printf("%s: too many elements for metal ctx->concur_list!\n", __func__);
+ GGML_METAL_LOG_WARN("%s: too many elements for metal ctx->concur_list!\n", __func__);
}
}
continue;
}
- //metal_printf("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op));
+ //GGML_METAL_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op));
struct ggml_tensor * src0 = gf->nodes[i]->src[0];
struct ggml_tensor * src1 = gf->nodes[i]->src[1];
struct ggml_tensor * dst = gf->nodes[i];
+ switch (dst->op) {
+ case GGML_OP_NONE:
+ case GGML_OP_RESHAPE:
+ case GGML_OP_VIEW:
+ case GGML_OP_TRANSPOSE:
+ case GGML_OP_PERMUTE:
+ {
+ // noop -> next node
+ } continue;
+ default:
+ {
+ } break;
+ }
+
const int64_t ne00 = src0 ? src0->ne[0] : 0;
const int64_t ne01 = src0 ? src0->ne[1] : 0;
const int64_t ne02 = src0 ? src0->ne[2] : 0;
id<MTLBuffer> id_src1 = src1 ? ggml_metal_get_buffer(ctx, src1, &offs_src1) : nil;
id<MTLBuffer> id_dst = dst ? ggml_metal_get_buffer(ctx, dst, &offs_dst) : nil;
- //metal_printf("%s: op - %s\n", __func__, ggml_op_name(dst->op));
+ //GGML_METAL_LOG_INFO("%s: op - %s\n", __func__, ggml_op_name(dst->op));
//if (src0) {
- // metal_printf("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02,
+ // GGML_METAL_LOG_INFO("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02,
// ggml_is_contiguous(src0), src0->name);
//}
//if (src1) {
- // metal_printf("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12,
+ // GGML_METAL_LOG_INFO("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12,
// ggml_is_contiguous(src1), src1->name);
//}
//if (dst) {
- // metal_printf("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2,
+ // GGML_METAL_LOG_INFO("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2,
// dst->name);
//}
switch (dst->op) {
- case GGML_OP_NONE:
- case GGML_OP_RESHAPE:
- case GGML_OP_VIEW:
- case GGML_OP_TRANSPOSE:
- case GGML_OP_PERMUTE:
+ case GGML_OP_CONCAT:
{
- // noop
+ const int64_t nb = ne00;
+
+ [encoder setComputePipelineState:ctx->pipeline_concat];
+ [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
+ [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
+ [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
+ [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
+ [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
+ [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
+ [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
+ [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
+ [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8];
+ [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9];
+ [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10];
+ [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
+ [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
+ [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
+ [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
+ [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
+ [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
+ [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
+ [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
+ [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
+ [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
+ [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
+ [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
+ [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
+ [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:24];
+ [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:25];
+ [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:26];
+ [encoder setBytes:&nb length:sizeof(nb) atIndex:27];
+
+ const int nth = MIN(1024, ne0);
+
+ [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
} break;
case GGML_OP_ADD:
{
GGML_ASSERT(ggml_is_contiguous(src0));
GGML_ASSERT(ggml_is_contiguous(src1));
- // utilize float4
- GGML_ASSERT(ne00 % 4 == 0);
- const int64_t nb = ne00/4;
+ bool bcast_row = false;
- if (ggml_nelements(src1) == ne10) {
+ int64_t nb = ne00;
+
+ if (ggml_nelements(src1) == ne10 && ne00 % 4 == 0) {
// src1 is a row
GGML_ASSERT(ne11 == 1);
+
+ nb = ne00 / 4;
[encoder setComputePipelineState:ctx->pipeline_add_row];
+
+ bcast_row = true;
} else {
[encoder setComputePipelineState:ctx->pipeline_add];
}
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
- [encoder setBytes:&nb length:sizeof(nb) atIndex:3];
-
- const int64_t n = ggml_nelements(dst)/4;
+ [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
+ [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
+ [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
+ [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
+ [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
+ [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8];
+ [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9];
+ [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10];
+ [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
+ [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
+ [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
+ [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
+ [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
+ [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
+ [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
+ [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
+ [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
+ [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
+ [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
+ [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
+ [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
+ [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:24];
+ [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:25];
+ [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:26];
+ [encoder setBytes:&nb length:sizeof(nb) atIndex:27];
+
+ if (bcast_row) {
+ const int64_t n = ggml_nelements(dst)/4;
+
+ [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
+ } else {
+ const int nth = MIN(1024, ne0);
- [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
+ [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
+ }
} break;
case GGML_OP_MUL:
{
const float scale = *(const float *) src1->data;
- [encoder setComputePipelineState:ctx->pipeline_scale];
+ int64_t n = ggml_nelements(dst);
+
+ if (n % 4 == 0) {
+ n /= 4;
+ [encoder setComputePipelineState:ctx->pipeline_scale_4];
+ } else {
+ [encoder setComputePipelineState:ctx->pipeline_scale];
+ }
+
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
[encoder setBytes:&scale length:sizeof(scale) atIndex:2];
- const int64_t n = ggml_nelements(dst)/4;
-
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
} break;
case GGML_OP_UNARY:
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
- const int64_t n = ggml_nelements(dst)/4;
+ const int64_t n = ggml_nelements(dst);
+ GGML_ASSERT(n % 4 == 0);
- [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
+ [encoder dispatchThreadgroups:MTLSizeMake(n/4, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
} break;
case GGML_UNARY_OP_RELU:
{
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
- const int64_t n = ggml_nelements(dst)/4;
+ const int64_t n = ggml_nelements(dst);
+ GGML_ASSERT(n % 4 == 0);
- [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
+ [encoder dispatchThreadgroups:MTLSizeMake(n/4, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
} break;
default:
{
- metal_printf("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
+ GGML_METAL_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
GGML_ASSERT(false);
}
} break;
+ case GGML_OP_SQR:
+ {
+ GGML_ASSERT(ggml_is_contiguous(src0));
+
+ [encoder setComputePipelineState:ctx->pipeline_sqr];
+ [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
+ [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
+
+ const int64_t n = ggml_nelements(dst);
+ [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
+ } break;
case GGML_OP_SOFT_MAX:
{
- const int nth = 32;
+ int nth = 32; // SIMD width
if (ne00%4 == 0) {
[encoder setComputePipelineState:ctx->pipeline_soft_max_4];
} else {
+ do {
+ nth *= 2;
+ } while (nth <= ne00 && nth <= 1024);
+ nth /= 2;
[encoder setComputePipelineState:ctx->pipeline_soft_max];
}
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
[encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
[encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
+ [encoder setThreadgroupMemoryLength:nth/32*sizeof(float) atIndex:0];
- [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
+ [encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
} break;
case GGML_OP_DIAG_MASK_INF:
{
} break;
case GGML_OP_MUL_MAT:
{
- // TODO: needs to be updated after PR: https://github.com/ggerganov/ggml/pull/224
-
GGML_ASSERT(ne00 == ne10);
- // GGML_ASSERT(ne02 == ne12); // Should be checked on individual data types until broadcast is implemented everywhere
- uint gqa = ne12/ne02;
GGML_ASSERT(ne03 == ne13);
+ const uint gqa = ne12/ne02;
+
+ // find the break-even point where the matrix-matrix kernel becomes more efficient compared
+ // to the matrix-vector kernel
+ int ne11_mm_min = 1;
+
+#if 0
+ // the numbers below are measured on M2 Ultra for 7B and 13B models
+ // these numbers do not translate to other devices or model sizes
+ // TODO: need to find a better approach
+ if ([ctx->device.name isEqualToString:@"Apple M2 Ultra"]) {
+ switch (src0t) {
+ case GGML_TYPE_F16: ne11_mm_min = 2; break;
+ case GGML_TYPE_Q8_0: ne11_mm_min = 7; break;
+ case GGML_TYPE_Q2_K: ne11_mm_min = 15; break;
+ case GGML_TYPE_Q3_K: ne11_mm_min = 7; break;
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1: ne11_mm_min = 15; break;
+ case GGML_TYPE_Q4_K: ne11_mm_min = 11; break;
+ case GGML_TYPE_Q5_0: // not tested yet
+ case GGML_TYPE_Q5_1: ne11_mm_min = 13; break; // not tested yet
+ case GGML_TYPE_Q5_K: ne11_mm_min = 7; break;
+ case GGML_TYPE_Q6_K: ne11_mm_min = 7; break;
+ default: ne11_mm_min = 1; break;
+ }
+ }
+#endif
+
// for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
// AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
- if (!ggml_is_transposed(src0) &&
+ if ([ctx->device supportsFamily:MTLGPUFamilyApple7] &&
+ !ggml_is_transposed(src0) &&
!ggml_is_transposed(src1) &&
src1t == GGML_TYPE_F32 &&
- [ctx->device supportsFamily:MTLGPUFamilyApple7] &&
- ne00%32 == 0 &&
- ne11 > 1) {
+ ne00 % 32 == 0 && ne00 >= 64 &&
+ ne11 > ne11_mm_min) {
+ //printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
switch (src0->type) {
case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f32_f32]; break;
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f16_f32]; break;
case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_0_f32]; break;
case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_1_f32]; break;
+ case GGML_TYPE_Q5_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q5_0_f32]; break;
+ case GGML_TYPE_Q5_1: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q5_1_f32]; break;
case GGML_TYPE_Q8_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q8_0_f32]; break;
case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q2_K_f32]; break;
case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q3_K_f32]; break;
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:12];
[encoder setBytes:&gqa length:sizeof(gqa) atIndex:13];
[encoder setThreadgroupMemoryLength:8192 atIndex:0];
- [encoder dispatchThreadgroups:MTLSizeMake( (ne11+31)/32, (ne01+63) / 64, ne12) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
+ [encoder dispatchThreadgroups:MTLSizeMake( (ne11 + 31)/32, (ne01 + 63)/64, ne12) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
} else {
int nth0 = 32;
int nth1 = 1;
int nrows = 1;
+ //printf("vector: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
// use custom matrix x vector kernel
switch (src0t) {
case GGML_TYPE_F32:
{
- [encoder setComputePipelineState:ctx->pipeline_mul_mat_f32_f32];
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_f32_f32];
nrows = 4;
} break;
case GGML_TYPE_F16:
nth0 = 32;
nth1 = 1;
if (ne11 * ne12 < 4) {
- [encoder setComputePipelineState:ctx->pipeline_mul_mat_f16_f32_1row];
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f32_1row];
} else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) {
- [encoder setComputePipelineState:ctx->pipeline_mul_mat_f16_f32_l4];
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f32_l4];
nrows = ne11;
} else {
- [encoder setComputePipelineState:ctx->pipeline_mul_mat_f16_f32];
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f32];
nrows = 4;
}
} break;
nth0 = 8;
nth1 = 8;
- [encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_0_f32];
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_q4_0_f32];
} break;
case GGML_TYPE_Q4_1:
{
nth0 = 8;
nth1 = 8;
- [encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_1_f32];
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_q4_1_f32];
+ } break;
+ case GGML_TYPE_Q5_0:
+ {
+ GGML_ASSERT(ne02 == 1);
+ GGML_ASSERT(ne12 == 1);
+
+ nth0 = 8;
+ nth1 = 8;
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_q5_0_f32];
+ } break;
+ case GGML_TYPE_Q5_1:
+ {
+ GGML_ASSERT(ne02 == 1);
+ GGML_ASSERT(ne12 == 1);
+
+ nth0 = 8;
+ nth1 = 8;
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_q5_1_f32];
} break;
case GGML_TYPE_Q8_0:
{
nth0 = 8;
nth1 = 8;
- [encoder setComputePipelineState:ctx->pipeline_mul_mat_q8_0_f32];
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_q8_0_f32];
} break;
case GGML_TYPE_Q2_K:
{
nth0 = 2;
nth1 = 32;
- [encoder setComputePipelineState:ctx->pipeline_mul_mat_q2_K_f32];
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_q2_K_f32];
} break;
case GGML_TYPE_Q3_K:
{
nth0 = 2;
nth1 = 32;
- [encoder setComputePipelineState:ctx->pipeline_mul_mat_q3_K_f32];
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_q3_K_f32];
} break;
case GGML_TYPE_Q4_K:
{
nth0 = 4; //1;
nth1 = 8; //32;
- [encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_K_f32];
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_q4_K_f32];
} break;
case GGML_TYPE_Q5_K:
{
nth0 = 2;
nth1 = 32;
- [encoder setComputePipelineState:ctx->pipeline_mul_mat_q5_K_f32];
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_q5_K_f32];
} break;
case GGML_TYPE_Q6_K:
{
nth0 = 2;
nth1 = 32;
- [encoder setComputePipelineState:ctx->pipeline_mul_mat_q6_K_f32];
+ [encoder setComputePipelineState:ctx->pipeline_mul_mv_q6_K_f32];
} break;
default:
{
- metal_printf("Asserting on type %d\n",(int)src0t);
+ GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src0t);
GGML_ASSERT(false && "not implemented");
}
};
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:16];
[encoder setBytes:&gqa length:sizeof(gqa) atIndex:17];
- if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q8_0 ||
- src0t == GGML_TYPE_Q2_K) {// || src0t == GGML_TYPE_Q4_K) {
+ if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 ||
+ src0t == GGML_TYPE_Q5_0 || src0t == GGML_TYPE_Q5_1 || src0t == GGML_TYPE_Q8_0 ||
+ src0t == GGML_TYPE_Q2_K) { // || src0t == GGML_TYPE_Q4_K) {
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
else if (src0t == GGML_TYPE_Q4_K) {
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_get_rows_f16]; break;
case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_0]; break;
case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_1]; break;
+ case GGML_TYPE_Q5_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q5_0]; break;
+ case GGML_TYPE_Q5_1: [encoder setComputePipelineState:ctx->pipeline_get_rows_q5_1]; break;
case GGML_TYPE_Q8_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q8_0]; break;
case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q2_K]; break;
case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q3_K]; break;
} break;
case GGML_OP_RMS_NORM:
{
+ GGML_ASSERT(ne00 % 4 == 0);
+
float eps;
memcpy(&eps, dst->op_params, sizeof(float));
- const int nth = 512;
+ const int nth = MIN(512, ne00);
[encoder setComputePipelineState:ctx->pipeline_rms_norm];
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
float eps;
memcpy(&eps, dst->op_params, sizeof(float));
- const int nth = 256;
+ const int nth = MIN(256, ne00);
[encoder setComputePipelineState:ctx->pipeline_norm];
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
{
GGML_ASSERT((src0t == GGML_TYPE_F32));
- const int n_past = ((int32_t *) dst->op_params)[0]; UNUSED(n_past);
+ const int nth = MIN(1024, ne00);
+
+ //const int n_past = ((int32_t *) dst->op_params)[0];
const int n_head = ((int32_t *) dst->op_params)[1];
float max_bias;
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
- if (__builtin_popcount(n_head) != 1) {
- GGML_ASSERT(false && "only power-of-two n_head implemented");
- }
-
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
+ const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
[encoder setComputePipelineState:ctx->pipeline_alibi_f32];
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
[encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
[encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
[encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
- [encoder setBytes:&m0 length:sizeof( float) atIndex:18];
-
- const int nth = 32;
+ [encoder setBytes:&m0 length:sizeof( float) atIndex:18];
+ [encoder setBytes:&m1 length:sizeof( float) atIndex:19];
+ [encoder setBytes:&n_heads_log2_floor length:sizeof(int) atIndex:20];
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
} break;
case GGML_OP_ROPE:
{
- const int n_past = ((int32_t *) dst->op_params)[0];
- const int n_dims = ((int32_t *) dst->op_params)[1];
- const int mode = ((int32_t *) dst->op_params)[2];
+ GGML_ASSERT(ne10 == ne02);
- float freq_base;
- float freq_scale;
- memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
- memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
+ const int nth = MIN(1024, ne00);
- [encoder setComputePipelineState:ctx->pipeline_rope];
- [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
- [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
- [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
- [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
- [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
- [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
- [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
- [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
- [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
- [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
- [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
- [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
- [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
- [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
- [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
- [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
- [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
- [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
- [encoder setBytes:&n_past length:sizeof( int) atIndex:18];
- [encoder setBytes:&n_dims length:sizeof( int) atIndex:19];
- [encoder setBytes:&mode length:sizeof( int) atIndex:20];
- [encoder setBytes:&freq_base length:sizeof(float) atIndex:21];
- [encoder setBytes:&freq_scale length:sizeof(float) atIndex:22];
+ const int n_past = ((int32_t *) dst->op_params)[0];
+ const int n_dims = ((int32_t *) dst->op_params)[1];
+ const int mode = ((int32_t *) dst->op_params)[2];
+ const int n_orig_ctx = ((int32_t *) dst->op_params)[3];
+
+ float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
+ memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
+ memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
+ memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
+ memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
+ memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
+ memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
+
+ switch (src0->type) {
+ case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_rope_f32]; break;
+ case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_rope_f16]; break;
+ default: GGML_ASSERT(false);
+ };
+
+ [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
+ [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
+ [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
+ [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3];
+ [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:4];
+ [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:5];
+ [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:6];
+ [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:7];
+ [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:8];
+ [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:9];
+ [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:10];
+ [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:11];
+ [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:12];
+ [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:13];
+ [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:14];
+ [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:15];
+ [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:16];
+ [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:17];
+ [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:18];
+ [encoder setBytes:&n_past length:sizeof( int) atIndex:19];
+ [encoder setBytes:&n_dims length:sizeof( int) atIndex:20];
+ [encoder setBytes:&mode length:sizeof( int) atIndex:21];
+ [encoder setBytes:&n_orig_ctx length:sizeof( int) atIndex:22];
+ [encoder setBytes:&freq_base length:sizeof( float) atIndex:23];
+ [encoder setBytes:&freq_scale length:sizeof( float) atIndex:24];
+ [encoder setBytes:&ext_factor length:sizeof( float) atIndex:25];
+ [encoder setBytes:&attn_factor length:sizeof( float) atIndex:26];
+ [encoder setBytes:&beta_fast length:sizeof( float) atIndex:27];
+ [encoder setBytes:&beta_slow length:sizeof( float) atIndex:28];
- [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(32, 1, 1)];
+ [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
} break;
case GGML_OP_DUP:
case GGML_OP_CPY:
case GGML_OP_CONT:
{
- const int nth = 32;
+ const int nth = MIN(1024, ne00);
switch (src0t) {
case GGML_TYPE_F32:
} break;
default:
{
- metal_printf("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
+ GGML_METAL_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
GGML_ASSERT(false);
}
}
MTLCommandBufferStatus status = (MTLCommandBufferStatus) [ctx->command_buffers[i] status];
if (status != MTLCommandBufferStatusCompleted) {
- metal_printf("%s: command buffer %d failed with status %lu\n", __func__, i, status);
+ GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
GGML_ASSERT(false);
}
}
}
}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// backend interface
+
+static const char * ggml_backend_metal_name(ggml_backend_t backend) {
+ return "Metal";
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_metal_free(ggml_backend_t backend) {
+ struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
+ ggml_metal_free(ctx);
+ free(backend);
+}
+
+static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) {
+ return (void *)buffer->context;
+}
+
+static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) {
+ free(buffer->context);
+ UNUSED(buffer);
+}
+
+static struct ggml_backend_buffer_i metal_backend_buffer_i = {
+ /* .free_buffer = */ ggml_backend_metal_buffer_free_buffer,
+ /* .get_base = */ ggml_backend_metal_buffer_get_base,
+ /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
+ /* .init_tensor = */ NULL, // no initialization required
+ /* .free_tensor = */ NULL, // no cleanup required
+};
+
+static ggml_backend_buffer_t ggml_backend_metal_alloc_buffer(ggml_backend_t backend, size_t size) {
+ struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
+
+ void * data = ggml_metal_host_malloc(size);
+
+ // TODO: set proper name of the buffers
+ ggml_metal_add_buffer(ctx, "backend", data, size, 0);
+
+ return ggml_backend_buffer_init(backend, metal_backend_buffer_i, data, size);
+}
+
+static size_t ggml_backend_metal_get_alignment(ggml_backend_t backend) {
+ return 32;
+ UNUSED(backend);
+}
+
+static void ggml_backend_metal_set_tensor_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
+ GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
+
+ memcpy((char *)tensor->data + offset, data, size);
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_metal_get_tensor_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
+ GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
+
+ memcpy(data, (const char *)tensor->data + offset, size);
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_metal_synchronize(ggml_backend_t backend) {
+ UNUSED(backend);
+}
+
+static void ggml_backend_metal_cpy_tensor_from(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) {
+ ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_metal_cpy_tensor_to(ggml_backend_t backend, struct ggml_tensor * src, struct ggml_tensor * dst) {
+ ggml_backend_tensor_set_async(dst, src->data, 0, ggml_nbytes(src));
+
+ UNUSED(backend);
+}
+
+static void ggml_backend_metal_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
+ struct ggml_metal_context * metal_ctx = (struct ggml_metal_context *)backend->context;
+
+ ggml_metal_graph_compute(metal_ctx, cgraph);
+}
+
+static bool ggml_backend_metal_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
+ return true;
+ UNUSED(backend);
+ UNUSED(op);
+}
+
+static struct ggml_backend_i metal_backend_i = {
+ /* .get_name = */ ggml_backend_metal_name,
+ /* .free = */ ggml_backend_metal_free,
+ /* .alloc_buffer = */ ggml_backend_metal_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_metal_get_alignment,
+ /* .set_tensor_async = */ ggml_backend_metal_set_tensor_async,
+ /* .get_tensor_async = */ ggml_backend_metal_get_tensor_async,
+ /* .synchronize = */ ggml_backend_metal_synchronize,
+ /* .cpy_tensor_from = */ ggml_backend_metal_cpy_tensor_from,
+ /* .cpy_tensor_to = */ ggml_backend_metal_cpy_tensor_to,
+ /* .graph_plan_create = */ NULL, // the metal implementation does not require creating graph plans atm
+ /* .graph_plan_free = */ NULL,
+ /* .graph_plan_compute = */ NULL,
+ /* .graph_compute = */ ggml_backend_metal_graph_compute,
+ /* .supports_op = */ ggml_backend_metal_supports_op,
+};
+
+ggml_backend_t ggml_backend_metal_init(void) {
+ struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context));
+
+ ctx = ggml_metal_init(GGML_DEFAULT_N_THREADS);
+
+ ggml_backend_t metal_backend = malloc(sizeof(struct ggml_backend));
+
+ *metal_backend = (struct ggml_backend) {
+ /* .interface = */ metal_backend_i,
+ /* .context = */ ctx,
+ };
+
+ return metal_backend;
+}
+
+bool ggml_backend_is_metal(ggml_backend_t backend) {
+ return backend->iface.get_name == ggml_backend_metal_name;
+}
+
+void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
+ struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
+
+ ggml_metal_set_n_cb(ctx, n_cb);
+}
#define QK4_1 32
typedef struct {
- half d; // delta
- half m; // min
+ half d; // delta
+ half m; // min
uint8_t qs[QK4_1 / 2]; // nibbles / quants
} block_q4_1;
+#define QK5_0 32
+typedef struct {
+ half d; // delta
+ uint8_t qh[4]; // 5-th bit of quants
+ uint8_t qs[QK5_0 / 2]; // nibbles / quants
+} block_q5_0;
+
+#define QK5_1 32
+typedef struct {
+ half d; // delta
+ half m; // min
+ uint8_t qh[4]; // 5-th bit of quants
+ uint8_t qs[QK5_1 / 2]; // nibbles / quants
+} block_q5_1;
+
#define QK8_0 32
typedef struct {
half d; // delta
int8_t qs[QK8_0]; // quants
} block_q8_0;
+// general-purpose kernel for addition of two tensors
+// pros: works for non-contiguous tensors, supports broadcast across dims 1, 2 and 3
+// cons: not very efficient
kernel void kernel_add(
- device const float4 * src0,
- device const float4 * src1,
- device float4 * dst,
- uint tpig[[thread_position_in_grid]]) {
- dst[tpig] = src0[tpig] + src1[tpig];
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ constant int64_t & ne00,
+ constant int64_t & ne01,
+ constant int64_t & ne02,
+ constant int64_t & ne03,
+ constant int64_t & nb00,
+ constant int64_t & nb01,
+ constant int64_t & nb02,
+ constant int64_t & nb03,
+ constant int64_t & ne10,
+ constant int64_t & ne11,
+ constant int64_t & ne12,
+ constant int64_t & ne13,
+ constant int64_t & nb10,
+ constant int64_t & nb11,
+ constant int64_t & nb12,
+ constant int64_t & nb13,
+ constant int64_t & ne0,
+ constant int64_t & ne1,
+ constant int64_t & ne2,
+ constant int64_t & ne3,
+ constant int64_t & nb0,
+ constant int64_t & nb1,
+ constant int64_t & nb2,
+ constant int64_t & nb3,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+ const int64_t i03 = tgpig.z;
+ const int64_t i02 = tgpig.y;
+ const int64_t i01 = tgpig.x;
+
+ const int64_t i13 = i03 % ne13;
+ const int64_t i12 = i02 % ne12;
+ const int64_t i11 = i01 % ne11;
+
+ device const char * src0_ptr = src0 + i03*nb03 + i02*nb02 + i01*nb01 + tpitg.x*nb00;
+ device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11 + tpitg.x*nb10;
+ device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1 + tpitg.x*nb0;
+
+ for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) {
+ ((device float *)dst_ptr)[0] = ((device float *)src0_ptr)[0] + ((device float *)src1_ptr)[0];
+
+ src0_ptr += ntg.x*nb00;
+ src1_ptr += ntg.x*nb10;
+ dst_ptr += ntg.x*nb0;
+ }
}
// assumption: src1 is a row
device const float4 * src0,
device const float4 * src1,
device float4 * dst,
- constant int64_t & nb,
+ constant int64_t & nb [[buffer(27)]],
uint tpig[[thread_position_in_grid]]) {
dst[tpig] = src0[tpig] + src1[tpig % nb];
}
}
kernel void kernel_scale(
+ device const float * src0,
+ device float * dst,
+ constant float & scale,
+ uint tpig[[thread_position_in_grid]]) {
+ dst[tpig] = src0[tpig] * scale;
+}
+
+kernel void kernel_scale_4(
device const float4 * src0,
device float4 * dst,
- constant float & scale,
+ constant float & scale,
uint tpig[[thread_position_in_grid]]) {
dst[tpig] = src0[tpig] * scale;
}
dst[tpig] = max(0.0f, src0[tpig]);
}
+kernel void kernel_sqr(
+ device const float * src0,
+ device float * dst,
+ uint tpig[[thread_position_in_grid]]) {
+ dst[tpig] = src0[tpig] * src0[tpig];
+}
+
constant float GELU_COEF_A = 0.044715f;
constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
- uint3 tgpig[[threadgroup_position_in_grid]],
- uint3 tpitg[[thread_position_in_threadgroup]],
- uint3 ntg[[threads_per_threadgroup]]) {
- const int64_t i03 = tgpig[2];
- const int64_t i02 = tgpig[1];
- const int64_t i01 = tgpig[0];
+ threadgroup float * buf [[threadgroup(0)]],
+ uint tgpig[[threadgroup_position_in_grid]],
+ uint tpitg[[thread_position_in_threadgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]],
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint ntg[[threads_per_threadgroup]]) {
+ const int64_t i03 = (tgpig) / (ne02*ne01);
+ const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01;
+ const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01);
device const float * psrc0 = src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
device float * pdst = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
// parallel max
- float lmax = psrc0[tpitg[0]];
- for (int i00 = tpitg[0] + ntg[0]; i00 < ne00; i00 += ntg[0]) {
+ float lmax = tpitg < ne00 ? psrc0[tpitg] : -INFINITY;
+
+ for (int i00 = tpitg + ntg; i00 < ne00; i00 += ntg) {
lmax = MAX(lmax, psrc0[i00]);
}
- const float max = simd_max(lmax);
+
+ float max = simd_max(lmax);
+ if (tiisg == 0) {
+ buf[sgitg] = max;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // broadcast, simd group number is ntg / 32
+ for (uint i = ntg / 32 / 2; i > 0; i /= 2) {
+ if (tpitg < i) {
+ buf[tpitg] = MAX(buf[tpitg], buf[tpitg + i]);
+ }
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ max = buf[0];
// parallel sum
float lsum = 0.0f;
- for (int i00 = tpitg[0]; i00 < ne00; i00 += ntg[0]) {
+ for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
const float exp_psrc0 = exp(psrc0[i00] - max);
lsum += exp_psrc0;
// Remember the result of exp here. exp is expensive, so we really do not
- // whish to compute it twice.
+ // wish to compute it twice.
pdst[i00] = exp_psrc0;
}
- const float sum = simd_sum(lsum);
+ float sum = simd_sum(lsum);
+ if (tiisg == 0) {
+ buf[sgitg] = sum;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // broadcast, simd group number is ntg / 32
+ for (uint i = ntg / 32 / 2; i > 0; i /= 2) {
+ if (tpitg < i) {
+ buf[tpitg] += buf[tpitg + i];
+ }
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ sum = buf[0];
- for (int i00 = tpitg[0]; i00 < ne00; i00 += ntg[0]) {
+ for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
pdst[i00] /= sum;
}
}
constant int64_t & ne00,
constant int64_t & ne01,
constant int64_t & ne02,
- uint3 tgpig[[threadgroup_position_in_grid]],
- uint3 tpitg[[thread_position_in_threadgroup]],
- uint3 ntg[[threads_per_threadgroup]]) {
- const int64_t i03 = tgpig[2];
- const int64_t i02 = tgpig[1];
- const int64_t i01 = tgpig[0];
+ threadgroup float * buf [[threadgroup(0)]],
+ uint tgpig[[threadgroup_position_in_grid]],
+ uint tpitg[[thread_position_in_threadgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]],
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint ntg[[threads_per_threadgroup]]) {
+ const int64_t i03 = (tgpig) / (ne02*ne01);
+ const int64_t i02 = (tgpig - i03*ne02*ne01) / ne01;
+ const int64_t i01 = (tgpig - i03*ne02*ne01 - i02*ne01);
device const float4 * psrc4 = (device const float4 *)(src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
device float4 * pdst4 = (device float4 *)(dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
// parallel max
- float4 lmax4 = psrc4[tpitg[0]];
- for (int i00 = tpitg[0] + ntg[0]; i00 < ne00/4; i00 += ntg[0]) {
+ float4 lmax4 = tpitg < ne00/4 ? psrc4[tpitg] : -INFINITY;
+
+ for (int i00 = tpitg + ntg; i00 < ne00/4; i00 += ntg) {
lmax4 = fmax(lmax4, psrc4[i00]);
}
- float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3]));
- const float max = simd_max(lmax);
+ const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3]));
+ float max = simd_max(lmax);
+ if (tiisg == 0) {
+ buf[sgitg] = max;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // broadcast, simd group number is ntg / 32
+ for (uint i = ntg / 32 / 2; i > 0; i /= 2) {
+ if (tpitg < i) {
+ buf[tpitg] = MAX(buf[tpitg], buf[tpitg + i]);
+ }
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ max = buf[0];
// parallel sum
float4 lsum4 = 0.0f;
- for (int i00 = tpitg[0]; i00 < ne00/4; i00 += ntg[0]) {
+ for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
const float4 exp_psrc4 = exp(psrc4[i00] - max);
lsum4 += exp_psrc4;
pdst4[i00] = exp_psrc4;
}
- float lsum = lsum4[0] + lsum4[1] + lsum4[2] + lsum4[3];
- const float sum = simd_sum(lsum);
+ const float lsum = lsum4[0] + lsum4[1] + lsum4[2] + lsum4[3];
+ float sum = simd_sum(lsum);
+ if (tiisg == 0) {
+ buf[sgitg] = sum;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // broadcast, simd group number is ntg / 32
+ for (uint i = ntg / 32 / 2; i > 0; i /= 2) {
+ if (tpitg < i) {
+ buf[tpitg] += buf[tpitg + i];
+ }
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ sum = buf[0];
- for (int i00 = tpitg[0]; i00 < ne00/4; i00 += ntg[0]) {
+ for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
pdst4[i00] /= sum;
}
}
dst[i02*ne01*ne00 + i01*ne00 + i00] = -INFINITY;
} else {
dst[i02*ne01*ne00 + i01*ne00 + i00] = src0[i02*ne01*ne00 + i01*ne00 + i00];
- }
+ }
}
kernel void kernel_diag_mask_inf_8(
uint sgitg[[simdgroup_index_in_threadgroup]],
uint tiisg[[thread_index_in_simdgroup]],
uint ntg[[threads_per_threadgroup]]) {
- device const float4 * x = (device const float4 *) ((device const char *) src0 + tgpig*nb01);
- device const float * x_scalar = (device const float *) x;
- float4 sumf=0;
- float all_sum=0;
+ device const float4 * x = (device const float4 *) ((device const char *) src0 + tgpig*nb01);
+ device const float * x_scalar = (device const float *) x;
+
+ float4 sumf = 0;
+ float all_sum = 0;
// parallel sum
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
}
threadgroup_barrier(mem_flags::mem_threadgroup);
+
// broadcast, simd group number is ntg / 32
for (uint i = ntg / 32 / 2; i > 0; i /= 2) {
if (tpitg < i) {
}
}
if (tpitg == 0) {
- for (int i = 4 * (ne00 / 4); i < ne00; i++) {sum[0] += x_scalar[i];}
+ for (int i = 4 * (ne00 / 4); i < ne00; i++) {
+ sum[0] += x_scalar[i];
+ }
sum[0] /= ne00;
}
y[i00] = x[i00] * scale;
}
if (tpitg == 0) {
- for (int i00 = 4 * (ne00 / 4); i00 < ne00; i00++) {y_scalar[i00] = x_scalar[i00] * scale;}
+ for (int i00 = 4 * (ne00 / 4); i00 < ne00; i00++) {
+ y_scalar[i00] = x_scalar[i00] * scale;
+ }
}
}
// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
inline float block_q_n_dot_y(device const block_q4_0 * qb_curr, float sumy, thread float * yl, int il) {
float d = qb_curr->d;
+
float2 acc = 0.f;
+
device const uint16_t * qs = ((device const uint16_t *)qb_curr + 1 + il/2);
+
for (int i = 0; i < 8; i+=2) {
acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F)
+ yl[i + 1] * (qs[i / 2] & 0x0F00);
inline float block_q_n_dot_y(device const block_q4_1 * qb_curr, float sumy, thread float * yl, int il) {
float d = qb_curr->d;
float m = qb_curr->m;
- device const uint16_t * qs = ((device const uint16_t *)qb_curr + 2 + il/2);
+
float2 acc = 0.f;
+
+ device const uint16_t * qs = ((device const uint16_t *)qb_curr + 2 + il/2);
+
for (int i = 0; i < 8; i+=2) {
acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F)
+ yl[i + 1] * (qs[i / 2] & 0x0F00);
return d * (acc[0] + acc[1]) + sumy * m;
}
+// function for calculate inner product between half a q5_0 block and 16 floats (yl), sumy is SUM(yl[i])
+// il indicates where the q5 quants begin (0 or QK5_0/4)
+// we assume that the yl's have been multiplied with the appropriate scale factor
+// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
+inline float block_q_n_dot_y(device const block_q5_0 * qb_curr, float sumy, thread float * yl, int il) {
+ float d = qb_curr->d;
+
+ float2 acc = 0.f;
+
+ device const uint16_t * qs = ((device const uint16_t *)qb_curr + 3 + il/2);
+ const uint32_t qh = *((device const uint32_t *)qb_curr->qh);
+
+ for (int i = 0; i < 8; i+=2) {
+ acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010))
+ + yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000));
+ acc[1] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100))
+ + yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000));
+ }
+ return d * (sumy * -16.f + acc[0] + acc[1]);
+}
+
+// function for calculate inner product between half a q5_1 block and 16 floats (yl), sumy is SUM(yl[i])
+// il indicates where the q5 quants begin (0 or QK5_1/4)
+// we assume that the yl's have been multiplied with the appropriate scale factor
+// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
+inline float block_q_n_dot_y(device const block_q5_1 * qb_curr, float sumy, thread float * yl, int il) {
+ float d = qb_curr->d;
+ float m = qb_curr->m;
+
+ float2 acc = 0.f;
+
+ device const uint16_t * qs = ((device const uint16_t *)qb_curr + 4 + il/2);
+ const uint32_t qh = *((device const uint32_t *)qb_curr->qh);
+
+ for (int i = 0; i < 8; i+=2) {
+ acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010))
+ + yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000));
+ acc[1] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100))
+ + yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000));
+ }
+ return d * (acc[0] + acc[1]) + sumy * m;
+}
+
// putting them in the kernel cause a significant performance penalty
-#define N_DST 4 // each SIMD group works on 4 rows
-#define N_SIMDGROUP 2 // number of SIMD groups in a thread group
+#define N_DST 4 // each SIMD group works on 4 rows
+#define N_SIMDGROUP 2 // number of SIMD groups in a thread group
#define N_SIMDWIDTH 32 // assuming SIMD group size is 32
//Note: This is a template, but strictly speaking it only applies to
// quantizations where the block size is 32. It also does not
int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne10, int64_t ne12, int64_t ne0, int64_t ne1, uint gqa,
uint3 tgpig, uint tiisg, uint sgitg) {
const int nb = ne00/QK4_0;
+
const int r0 = tgpig.x;
const int r1 = tgpig.y;
const int im = tgpig.z;
+
const int first_row = (r0 * nsg + sgitg) * nr;
+
const uint offset0 = first_row * nb + im/gqa*(nb*ne0);
+
device const block_q_type * x = (device const block_q_type *) src0 + offset0;
device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
- float yl[16]; // src1 vector cache
- float sumf[nr]={0.f};
- const int ix = tiisg/2;
- const int il = 8*(tiisg%2);
+ float yl[16]; // src1 vector cache
+ float sumf[nr] = {0.f};
+
+ const int ix = (tiisg/2);
+ const int il = (tiisg%2)*8;
device const float * yb = y + ix * QK4_0 + il;
sumy += yb[i] + yb[i+1];
yl[i+0] = yb[i+ 0];
yl[i+1] = yb[i+ 1]/256.f;
+
sumy += yb[i+16] + yb[i+17];
yl[i+8] = yb[i+16]/16.f;
yl[i+9] = yb[i+17]/4096.f;
for (int row = 0; row < nr; ++row) {
const float tot = simd_sum(sumf[row]);
if (tiisg == 0 && first_row + row < ne01) {
- dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot;
+ dst[im*ne0*ne1 + r1*ne0 + first_row + row] = tot;
}
}
}
-kernel void kernel_mul_mat_q4_0_f32(
+kernel void kernel_mul_mv_q4_0_f32(
device const void * src0,
device const float * src1,
device float * dst,
constant int64_t & ne1[[buffer(16)]],
constant uint & gqa[[buffer(17)]],
uint3 tgpig[[threadgroup_position_in_grid]],
- uint tiisg[[thread_index_in_simdgroup]],
- uint sgitg[[simdgroup_index_in_threadgroup]]) {
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]]) {
mul_vec_q_n_f32<block_q4_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,gqa,tgpig,tiisg,sgitg);
}
-kernel void kernel_mul_mat_q4_1_f32(
+kernel void kernel_mul_mv_q4_1_f32(
device const void * src0,
device const float * src1,
device float * dst,
mul_vec_q_n_f32<block_q4_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,gqa,tgpig,tiisg,sgitg);
}
+kernel void kernel_mul_mv_q5_0_f32(
+ device const void * src0,
+ device const float * src1,
+ device float * dst,
+ constant int64_t & ne00,
+ constant int64_t & ne01[[buffer(4)]],
+ constant int64_t & ne02[[buffer(5)]],
+ constant int64_t & ne10[[buffer(9)]],
+ constant int64_t & ne12[[buffer(11)]],
+ constant int64_t & ne0[[buffer(15)]],
+ constant int64_t & ne1[[buffer(16)]],
+ constant uint & gqa[[buffer(17)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]]) {
+ mul_vec_q_n_f32<block_q5_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,gqa,tgpig,tiisg,sgitg);
+}
+
+kernel void kernel_mul_mv_q5_1_f32(
+ device const void * src0,
+ device const float * src1,
+ device float * dst,
+ constant int64_t & ne00,
+ constant int64_t & ne01[[buffer(4)]],
+ constant int64_t & ne02[[buffer(5)]],
+ constant int64_t & ne10[[buffer(9)]],
+ constant int64_t & ne12[[buffer(11)]],
+ constant int64_t & ne0[[buffer(15)]],
+ constant int64_t & ne1[[buffer(16)]],
+ constant uint & gqa[[buffer(17)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]]) {
+ mul_vec_q_n_f32<block_q5_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,gqa,tgpig,tiisg,sgitg);
+}
+
+
#define NB_Q8_0 8
-kernel void kernel_mul_mat_q8_0_f32(
+kernel void kernel_mul_mv_q8_0_f32(
device const void * src0,
device const float * src1,
device float * dst,
#define N_F32_F32 4
-kernel void kernel_mul_mat_f32_f32(
+kernel void kernel_mul_mv_f32_f32(
device const char * src0,
device const char * src1,
device float * dst,
}
}
-kernel void kernel_mul_mat_f16_f32_1row(
+kernel void kernel_mul_mv_f16_f32_1row(
device const char * src0,
device const char * src1,
device float * dst,
constant int64_t & ne0,
constant int64_t & ne1,
uint3 tgpig[[threadgroup_position_in_grid]],
- uint tiisg[[thread_index_in_simdgroup]]) {
+ uint tiisg[[thread_index_in_simdgroup]]) {
const int64_t r0 = tgpig.x;
const int64_t r1 = tgpig.y;
#define N_F16_F32 4
-kernel void kernel_mul_mat_f16_f32(
+kernel void kernel_mul_mv_f16_f32(
device const char * src0,
device const char * src1,
device float * dst,
}
// Assumes row size (ne00) is a multiple of 4
-kernel void kernel_mul_mat_f16_f32_l4(
+kernel void kernel_mul_mv_f16_f32_l4(
device const char * src0,
device const char * src1,
device float * dst,
constant uint64_t & nb1,
constant uint64_t & nb2,
constant uint64_t & nb3,
- constant float & m0,
+ constant float & m0,
+ constant float & m1,
+ constant int & n_heads_log2_floor,
uint3 tgpig[[threadgroup_position_in_grid]],
uint3 tpitg[[thread_position_in_threadgroup]],
uint3 ntg[[threads_per_threadgroup]]) {
const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
- float m_k = pow(m0, i2 + 1);
+ float m_k;
+ if (i2 < n_heads_log2_floor) {
+ m_k = pow(m0, i2 + 1);
+ } else {
+ m_k = pow(m1, 2 * (i2 - n_heads_log2_floor) + 1);
+ }
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
dst_data[i00] = src[0] + m_k * (i00 - ne00 + 1);
}
}
+static float rope_yarn_ramp(const float low, const float high, const int i0) {
+ const float y = (i0 / 2 - low) / max(0.001f, high - low);
+ return 1.0f - min(1.0f, max(0.0f, y));
+}
+
+// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
+// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
+static void rope_yarn(
+ float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale,
+ thread float * cos_theta, thread float * sin_theta
+) {
+ // Get n-d rotational scaling corrected for extrapolation
+ float theta_interp = freq_scale * theta_extrap;
+ float theta = theta_interp;
+ if (ext_factor != 0.0f) {
+ float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor;
+ theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
+
+ // Get n-d magnitude scaling corrected for interpolation
+ mscale *= 1.0f + 0.1f * log(1.0f / freq_scale);
+ }
+ *cos_theta = cos(theta) * mscale;
+ *sin_theta = sin(theta) * mscale;
+}
+
+// Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
+// `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
+static float rope_yarn_corr_factor(int n_dims, int n_orig_ctx, float n_rot, float base) {
+ return n_dims * log(n_orig_ctx / (n_rot * 2 * M_PI_F)) / (2 * log(base));
+}
+
+static void rope_yarn_corr_dims(
+ int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]
+) {
+ // start and end correction dims
+ dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_fast, freq_base)));
+ dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_orig_ctx, beta_slow, freq_base)));
+}
+
+typedef void (rope_t)(
+ device const void * src0,
+ device const int32_t * src1,
+ device float * dst,
+ constant int64_t & ne00,
+ constant int64_t & ne01,
+ constant int64_t & ne02,
+ constant int64_t & ne03,
+ constant uint64_t & nb00,
+ constant uint64_t & nb01,
+ constant uint64_t & nb02,
+ constant uint64_t & nb03,
+ constant int64_t & ne0,
+ constant int64_t & ne1,
+ constant int64_t & ne2,
+ constant int64_t & ne3,
+ constant uint64_t & nb0,
+ constant uint64_t & nb1,
+ constant uint64_t & nb2,
+ constant uint64_t & nb3,
+ constant int & n_past,
+ constant int & n_dims,
+ constant int & mode,
+ constant int & n_orig_ctx,
+ constant float & freq_base,
+ constant float & freq_scale,
+ constant float & ext_factor,
+ constant float & attn_factor,
+ constant float & beta_fast,
+ constant float & beta_slow,
+ uint tiitg[[thread_index_in_threadgroup]],
+ uint3 tptg[[threads_per_threadgroup]],
+ uint3 tgpig[[threadgroup_position_in_grid]]);
+
+template<typename T>
kernel void kernel_rope(
- device const void * src0,
- device float * dst,
- constant int64_t & ne00,
- constant int64_t & ne01,
- constant int64_t & ne02,
- constant int64_t & ne03,
- constant uint64_t & nb00,
- constant uint64_t & nb01,
- constant uint64_t & nb02,
- constant uint64_t & nb03,
- constant int64_t & ne0,
- constant int64_t & ne1,
- constant int64_t & ne2,
- constant int64_t & ne3,
- constant uint64_t & nb0,
- constant uint64_t & nb1,
- constant uint64_t & nb2,
- constant uint64_t & nb3,
- constant int & n_past,
- constant int & n_dims,
- constant int & mode,
- constant float & freq_base,
- constant float & freq_scale,
+ device const void * src0,
+ device const int32_t * src1,
+ device float * dst,
+ constant int64_t & ne00,
+ constant int64_t & ne01,
+ constant int64_t & ne02,
+ constant int64_t & ne03,
+ constant uint64_t & nb00,
+ constant uint64_t & nb01,
+ constant uint64_t & nb02,
+ constant uint64_t & nb03,
+ constant int64_t & ne0,
+ constant int64_t & ne1,
+ constant int64_t & ne2,
+ constant int64_t & ne3,
+ constant uint64_t & nb0,
+ constant uint64_t & nb1,
+ constant uint64_t & nb2,
+ constant uint64_t & nb3,
+ constant int & n_past,
+ constant int & n_dims,
+ constant int & mode,
+ constant int & n_orig_ctx,
+ constant float & freq_base,
+ constant float & freq_scale,
+ constant float & ext_factor,
+ constant float & attn_factor,
+ constant float & beta_fast,
+ constant float & beta_slow,
uint tiitg[[thread_index_in_threadgroup]],
uint3 tptg[[threads_per_threadgroup]],
uint3 tgpig[[threadgroup_position_in_grid]]) {
const bool is_neox = mode & 2;
- const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
+ float corr_dims[2];
+ rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
- const float theta_0 = freq_scale * (float)p;
+ device const int32_t * pos = src1;
+
+ const int64_t p = pos[i2];
+
+ const float theta_0 = (float)p;
const float inv_ndims = -1.f/n_dims;
if (!is_neox) {
for (int64_t i0 = 2*tiitg; i0 < ne0; i0 += 2*tptg.x) {
const float theta = theta_0 * pow(freq_base, inv_ndims*i0);
- const float cos_theta = cos(theta);
- const float sin_theta = sin(theta);
+ float cos_theta, sin_theta;
+ rope_yarn(theta, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);
- device const float * const src = (device float *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
- device float * dst_data = (device float *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+ device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
- const float x0 = src[0];
- const float x1 = src[1];
+ const T x0 = src[0];
+ const T x1 = src[1];
dst_data[0] = x0*cos_theta - x1*sin_theta;
dst_data[1] = x0*sin_theta + x1*cos_theta;
for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
for (int64_t ic = 2*tiitg; ic < n_dims; ic += 2*tptg.x) {
- const float theta = theta_0 * pow(freq_base, inv_ndims*ic - ib);
- const float cos_theta = cos(theta);
- const float sin_theta = sin(theta);
+ // simplified from `(ib * n_dims + ic) * inv_ndims`
+ const float cur_rot = inv_ndims*ic - ib;
+
+ const float theta = theta_0 * pow(freq_base, cur_rot);
+ float cos_theta, sin_theta;
+ rope_yarn(theta, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta);
const int64_t i0 = ib*n_dims + ic/2;
- device const float * const src = (device float *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
- device float * dst_data = (device float *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+ device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
+ device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
const float x0 = src[0];
const float x1 = src[n_dims/2];
}
}
+template [[host_name("kernel_rope_f32")]] kernel rope_t kernel_rope<float>;
+template [[host_name("kernel_rope_f16")]] kernel rope_t kernel_rope<half>;
+
kernel void kernel_cpy_f16_f16(
device const half * src0,
device half * dst,
}
}
+kernel void kernel_concat(
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ constant int64_t & ne00,
+ constant int64_t & ne01,
+ constant int64_t & ne02,
+ constant int64_t & ne03,
+ constant uint64_t & nb00,
+ constant uint64_t & nb01,
+ constant uint64_t & nb02,
+ constant uint64_t & nb03,
+ constant int64_t & ne10,
+ constant int64_t & ne11,
+ constant int64_t & ne12,
+ constant int64_t & ne13,
+ constant uint64_t & nb10,
+ constant uint64_t & nb11,
+ constant uint64_t & nb12,
+ constant uint64_t & nb13,
+ constant int64_t & ne0,
+ constant int64_t & ne1,
+ constant int64_t & ne2,
+ constant int64_t & ne3,
+ constant uint64_t & nb0,
+ constant uint64_t & nb1,
+ constant uint64_t & nb2,
+ constant uint64_t & nb3,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+
+ const int64_t i03 = tgpig.z;
+ const int64_t i02 = tgpig.y;
+ const int64_t i01 = tgpig.x;
+
+ const int64_t i13 = i03 % ne13;
+ const int64_t i12 = i02 % ne12;
+ const int64_t i11 = i01 % ne11;
+
+ device const char * src0_ptr = src0 + i03 * nb03 + i02 * nb02 + i01 * nb01 + tpitg.x*nb00;
+ device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11 + tpitg.x*nb10;
+ device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1 + tpitg.x*nb0;
+
+ for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) {
+ if (i02 < ne02) {
+ ((device float *)dst_ptr)[0] = ((device float *)src0_ptr)[0];
+ src0_ptr += ntg.x*nb00;
+ } else {
+ ((device float *)dst_ptr)[0] = ((device float *)src1_ptr)[0];
+ src1_ptr += ntg.x*nb10;
+ }
+ dst_ptr += ntg.x*nb0;
+ }
+}
+
//============================================ k-quants ======================================================
#ifndef QK_K
//====================================== dot products =========================
-kernel void kernel_mul_mat_q2_K_f32(
+kernel void kernel_mul_mv_q2_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
}
#if QK_K == 256
-kernel void kernel_mul_mat_q3_K_f32(
+kernel void kernel_mul_mv_q3_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
float yl[32];
- const uint16_t kmask1 = 0x3030;
- const uint16_t kmask2 = 0x0f0f;
+ //const uint16_t kmask1 = 0x3030;
+ //const uint16_t kmask2 = 0x0f0f;
const int tid = tiisg/4;
const int ix = tiisg%4;
}
}
#else
-kernel void kernel_mul_mat_q3_K_f32(
+kernel void kernel_mul_mv_q3_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
#endif
#if QK_K == 256
-kernel void kernel_mul_mat_q4_K_f32(
+kernel void kernel_mul_mv_q4_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
}
}
#else
-kernel void kernel_mul_mat_q4_K_f32(
+kernel void kernel_mul_mv_q4_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
}
#endif
-kernel void kernel_mul_mat_q5_K_f32(
+kernel void kernel_mul_mv_q5_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
}
-kernel void kernel_mul_mat_q6_K_f32(
+kernel void kernel_mul_mv_q6_K_f32(
device const void * src0,
device const float * src1,
device float * dst,
}
}
+template <typename type4x4>
+void dequantize_q5_0(device const block_q5_0 *xb, short il, thread type4x4 & reg) {
+ device const uint16_t * qs = ((device const uint16_t *)xb + 3);
+ const float d = xb->d;
+ const float md = -16.h * xb->d;
+ const ushort mask = il ? 0x00F0 : 0x000F;
+
+ const uint32_t qh = *((device const uint32_t *)xb->qh);
+
+ const int x_mv = il ? 4 : 0;
+
+ const int gh_mv = il ? 12 : 0;
+ const int gh_bk = il ? 0 : 4;
+
+ for (int i = 0; i < 8; i++) {
+ // extract the 5-th bits for x0 and x1
+ const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10;
+ const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10;
+
+ // combine the 4-bits from qs with the 5th bit
+ const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0);
+ const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1);
+
+ reg[i/2][2*(i%2)+0] = d * x0 + md;
+ reg[i/2][2*(i%2)+1] = d * x1 + md;
+ }
+}
+
+template <typename type4x4>
+void dequantize_q5_1(device const block_q5_1 *xb, short il, thread type4x4 & reg) {
+ device const uint16_t * qs = ((device const uint16_t *)xb + 4);
+ const float d = xb->d;
+ const float m = xb->m;
+ const ushort mask = il ? 0x00F0 : 0x000F;
+
+ const uint32_t qh = *((device const uint32_t *)xb->qh);
+
+ const int x_mv = il ? 4 : 0;
+
+ const int gh_mv = il ? 12 : 0;
+ const int gh_bk = il ? 0 : 4;
+
+ for (int i = 0; i < 8; i++) {
+ // extract the 5-th bits for x0 and x1
+ const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10;
+ const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10;
+
+ // combine the 4-bits from qs with the 5th bit
+ const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0);
+ const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1);
+
+ reg[i/2][2*(i%2)+0] = d * x0 + m;
+ reg[i/2][2*(i%2)+1] = d * x1 + m;
+ }
+}
+
template <typename type4x4>
void dequantize_q8_0(device const block_q8_0 *xb, short il, thread type4x4 & reg) {
device const int8_t * qs = ((device const int8_t *)xb->qs);
}
#define BLOCK_SIZE_M 64 // 8 simdgroup matrices from matrix A
-#define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix A
+#define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix B
#define BLOCK_SIZE_K 32
#define THREAD_MAT_M 4 // each thread take 4 simdgroup matrices from matrix A
#define THREAD_MAT_N 2 // each thread take 2 simdgroup matrices from matrix B
const uint r0 = tgpig.y;
const uint r1 = tgpig.x;
const uint im = tgpig.z;
+
// if this block is of 64x32 shape or smaller
short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M;
short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N;
+
// a thread shouldn't load data outside of the matrix
short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1;
short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1;
+ nb10 * (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL)));
for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) {
- //load data and store to threadgroup memory
+ // load data and store to threadgroup memory
half4x4 temp_a;
dequantize_func(x, il, temp_a);
threadgroup_barrier(mem_flags::mem_threadgroup);
+
#pragma unroll(16)
for (int i = 0; i < 16; i++) {
*(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \
- + 16 * (tiitg % THREAD_PER_ROW) + 8 * (i / 8)) \
- + (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4];
+ + (tiitg % THREAD_PER_ROW) * 16 + (i / 8) * 8) \
+ + (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4];
}
- *(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) \
- = *((device float2x4 *)y);
+
+ *(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) = *((device float2x4 *)y);
+
il = (il + 2 < nl) ? il + 2 : il % 2;
x = (il < 2) ? x + (2+nl-1)/nl : x;
y += BLOCK_SIZE_K;
threadgroup_barrier(mem_flags::mem_threadgroup);
- //load matrices from threadgroup memory and conduct outer products
+
+ // load matrices from threadgroup memory and conduct outer products
threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2));
threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2));
+
#pragma unroll(4)
for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) {
#pragma unroll(4)
lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE;
lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE;
+
#pragma unroll(8)
for (int i = 0; i < 8; i++){
simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]);
}
if ((r0 + 1) * BLOCK_SIZE_M <= ne0 && (r1 + 1) * BLOCK_SIZE_N <= ne1) {
- device float *C = dst + BLOCK_SIZE_M * r0 + 32 * (sgitg&1) \
- + (BLOCK_SIZE_N * r1 + 16 * (sgitg>>1)) * ne0 + im*ne1*ne0;
+ device float * C = dst + (BLOCK_SIZE_M * r0 + 32 * (sgitg & 1)) \
+ + (BLOCK_SIZE_N * r1 + 16 * (sgitg >> 1)) * ne0 + im*ne1*ne0;
for (int i = 0; i < 8; i++) {
simdgroup_store(c_res[i], C + 8 * (i%4) + 8 * ne0 * (i/4), ne0);
}
} else {
// block is smaller than 64x32, we should avoid writing data outside of the matrix
threadgroup_barrier(mem_flags::mem_threadgroup);
- threadgroup float *temp_str = ((threadgroup float *)shared_memory) \
+ threadgroup float * temp_str = ((threadgroup float *)shared_memory) \
+ 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M;
for (int i = 0; i < 8; i++) {
simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M);
}
threadgroup_barrier(mem_flags::mem_threadgroup);
- device float *C = dst + BLOCK_SIZE_M * r0 + (BLOCK_SIZE_N * r1) * ne0 + im*ne1*ne0;
- if (sgitg==0) {
+
+ device float * C = dst + (BLOCK_SIZE_M * r0) + (BLOCK_SIZE_N * r1) * ne0 + im*ne1*ne0;
+ if (sgitg == 0) {
for (int i = 0; i < n_rows; i++) {
- for (int j = tiitg; j< n_cols; j += BLOCK_SIZE_N) {
+ for (int j = tiitg; j < n_cols; j += BLOCK_SIZE_N) {
*(C + i + j * ne0) = *(temp_str + i + j * BLOCK_SIZE_M);
}
}
template [[host_name("kernel_get_rows_f16")]] kernel get_rows_t kernel_get_rows<half4x4, 1, dequantize_f16>;
template [[host_name("kernel_get_rows_q4_0")]] kernel get_rows_t kernel_get_rows<block_q4_0, 2, dequantize_q4_0>;
template [[host_name("kernel_get_rows_q4_1")]] kernel get_rows_t kernel_get_rows<block_q4_1, 2, dequantize_q4_1>;
+template [[host_name("kernel_get_rows_q5_0")]] kernel get_rows_t kernel_get_rows<block_q5_0, 2, dequantize_q5_0>;
+template [[host_name("kernel_get_rows_q5_1")]] kernel get_rows_t kernel_get_rows<block_q5_1, 2, dequantize_q5_1>;
template [[host_name("kernel_get_rows_q8_0")]] kernel get_rows_t kernel_get_rows<block_q8_0, 2, dequantize_q8_0>;
template [[host_name("kernel_get_rows_q2_K")]] kernel get_rows_t kernel_get_rows<block_q2_K, QK_NL, dequantize_q2_K>;
template [[host_name("kernel_get_rows_q3_K")]] kernel get_rows_t kernel_get_rows<block_q3_K, QK_NL, dequantize_q3_K>;
template [[host_name("kernel_mul_mm_f16_f32")]] kernel mat_mm_t kernel_mul_mm<half4x4, 1, dequantize_f16>;
template [[host_name("kernel_mul_mm_q4_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_0, 2, dequantize_q4_0>;
template [[host_name("kernel_mul_mm_q4_1_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_1, 2, dequantize_q4_1>;
+template [[host_name("kernel_mul_mm_q5_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q5_0, 2, dequantize_q5_0>;
+template [[host_name("kernel_mul_mm_q5_1_f32")]] kernel mat_mm_t kernel_mul_mm<block_q5_1, 2, dequantize_q5_1>;
template [[host_name("kernel_mul_mm_q8_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q8_0, 2, dequantize_q8_0>;
template [[host_name("kernel_mul_mm_q2_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q2_K, QK_NL, dequantize_q2_K>;
template [[host_name("kernel_mul_mm_q3_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q3_K, QK_NL, dequantize_q3_K>;
#pragma warning(disable: 4244 4267) // possible loss of data
#endif
-#define CL_DMMV_BLOCK_SIZE 32
+#define CL_DMMV_LOCAL_SIZE 32
#ifndef K_QUANTS_PER_ITERATION
#define K_QUANTS_PER_ITERATION 1
__kernel void dequantize_block_q2_K(__global const struct block_q2_K *x, __global float *yy)
{
- const int i = get_group_id(0);
+ const int i = get_group_id(0) + get_global_offset(0);
const int tid = get_local_id(0);
const int n = tid / 32;
const int l = tid - 32 * n;
const int is = 8 * n + l / 16;
const uint8_t q = x[i].qs[32 * n + l];
- __global float *y = yy + i * QK_K + 128 * n;
+ __global float *y = yy + get_group_id(0) * QK_K + 128 * n;
const float dall = vload_half(0, &x[i].d);
const float dmin = vload_half(0, &x[i].dmin);
__kernel void dequantize_block_q3_K(__global const struct block_q3_K *x, __global float *yy)
{
int r = get_local_id(0) / 4;
- int i = get_group_id(0);
+ int i = get_group_id(0) + get_global_offset(0);
int tid = r / 2;
int is0 = r % 2;
int l0 = 16 * is0 + 4 * (get_local_id(0) % 4);
float d_all = vload_half(0, &x[i].d);
float dl = d_all * (us - 32);
- __global float *y = yy + i * QK_K + 128 * n + 32 * j;
+ __global float *y = yy + get_group_id(0) * QK_K + 128 * n + 32 * j;
const __global uint8_t *q = x[i].qs + 32 * n;
const __global uint8_t *hm = x[i].hmask;
__kernel void dequantize_block_q4_K(__global const struct block_q4_K *x, __global float *yy)
{
- const int i = get_group_id(0);
+ const int i = get_group_id(0) + get_global_offset(0);
const int tid = get_local_id(0);
const int il = tid / 8;
const int ir = tid % 8;
const int is = 2 * il;
const int n = 4;
- __global float *y = yy + i * QK_K + 64 * il + n * ir;
+ __global float *y = yy + get_group_id(0) * QK_K + 64 * il + n * ir;
const float dall = vload_half(0, &x[i].d);
const float dmin = vload_half(0, &x[i].dmin);
__kernel void dequantize_block_q5_K(__global const struct block_q5_K *x, __global float *yy)
{
- const int i = get_group_id(0);
+ const int i = get_group_id(0) + get_global_offset(0);
const int tid = get_local_id(0);
const int il = tid / 16;
const int ir = tid % 16;
const int is = 2 * il;
- __global float *y = yy + i * QK_K + 64 * il + 2 * ir;
+ __global float *y = yy + get_group_id(0) * QK_K + 64 * il + 2 * ir;
const float dall = vload_half(0, &x[i].d);
const float dmin = vload_half(0, &x[i].dmin);
__kernel void dequantize_block_q6_K(__global const struct block_q6_K *x, __global float *yy)
{
- const int i = get_group_id(0);
+ const int i = get_group_id(0) + get_global_offset(0);
const int tid = get_local_id(0);
const int ip = tid / 32;
const int il = tid - 32 * ip;
const int is = 8 * ip + il / 16;
- __global float *y = yy + i * QK_K + 128 * ip + il;
+ __global float *y = yy + get_group_id(0) * QK_K + 128 * ip + il;
const float d = vload_half(0, &x[i].d);
const int row = get_group_id(0);
const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row;
+ const int ib0 = row*num_blocks_per_row + get_global_offset(0);
__global const struct block_q2_K * x = xx + ib0;
const int row = get_group_id(0);
const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row;
+ const int ib0 = row*num_blocks_per_row + get_global_offset(0);
__global const struct block_q3_K * x = xx + ib0;
const int row = get_group_id(0);
const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row;
+ const int ib0 = row*num_blocks_per_row + get_global_offset(0);
const int tid = get_local_id(0)/K_QUANTS_PER_ITERATION; // 0...15
const int ix = get_local_id(0)%K_QUANTS_PER_ITERATION;
const int row = get_group_id(0);
const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row;
+ const int ib0 = row*num_blocks_per_row + get_global_offset(0);
const int tid = get_local_id(0)/2; // 0...15
const int ix = get_local_id(0)%2;
const int row = get_group_id(0);
const int num_blocks_per_row = ncols / QK_K;
- const int ib0 = row*num_blocks_per_row;
+ const int ib0 = row*num_blocks_per_row + get_global_offset(0);
__global const struct block_q6_K * x = xx + ib0;
const uint qk = QUANT_K;
const uint qr = QUANT_R;
- const int ib = i/qk; // block index
+ const int ib = i/qk + get_global_offset(0); // block index
const int iqs = (i%qk)/qr; // quant index
const int iybs = i - i%qk; // y block start index
const int y_offset = qr == 1 ? 1 : qk/2;
std::string dequant_mul_mat_vec_template = MULTILINE_QUOTE(
__kernel void KERNEL_NAME(__global X_TYPE* x, __local float* tmp, __global float* y, __global float* dst, const int ncols) {
- const int block_size = get_local_size(0);
+ const int local_size = get_local_size(0);
const int row = get_group_id(0);
const int tid = get_local_id(0);
const uint qk = QUANT_K;
const uint qr = QUANT_R;
+ const int col_step = local_size * 2;
const int y_offset = qr == 1 ? 1 : qk/2;
+ x += get_global_offset(0);
+
tmp[tid] = 0;
- for (int i = 0; i < ncols/block_size; i += 2) {
- const int col = i*block_size + 2*tid;
+ for (int col = tid*2; col < ncols; col += col_step) {
const int ib = (row*ncols + col)/qk; // block index
const int iqs = (col%qk)/qr; // quant index
const int iybs = col - col%qk; // y block start index
// sum up partial sums and write back result
barrier(CLK_LOCAL_MEM_FENCE);
- for (int s=block_size/2; s>0; s>>=1) {
+ for (int s=local_size/2; s>0; s>>=1) {
if (tid < s) {
tmp[tid] += tmp[tid + s];
}
"mul_f32", "float"
};
-std::string& replace(std::string& s, const std::string& from, const std::string& to) {
+static std::string& replace(std::string& s, const std::string& from, const std::string& to) {
size_t pos = 0;
while ((pos = s.find(from, pos)) != std::string::npos) {
s.replace(pos, from.length(), to);
return s;
}
-std::string generate_kernels() {
+static std::string generate_kernels() {
std::stringstream src;
src << program_source << '\n';
src << k_quants_source << '\n';
const enum ggml_type type = src->type;
const size_t ts = ggml_type_size(type);
const size_t bs = ggml_blck_size(type);
+ const uint64_t row_size = ts*ne0/bs;
- const void * x = (const void *) ((const char *) src->data + i2*nb2 + i3*nb3);
- if (nb0 == ts && nb1 == ts*ne0/bs) {
- err = clEnqueueWriteBuffer(queue, dst, CL_FALSE, offset, ne1*nb1, x, 0, NULL, ev);
- return err;
+ const char * x = (const char *) src->data + i2*nb2 + i3*nb3;
+ if (nb0 == ts && nb1 == row_size) {
+ return clEnqueueWriteBuffer(queue, dst, CL_FALSE, offset, ne1*row_size, x, 0, NULL, ev);
}
if (nb0 == ts) {
const size_t buffer_origin[3] = { offset, 0, 0 };
const size_t host_origin[3] = { 0, 0, 0 };
- const size_t region[3] = { ts*ne0/bs, ne1, 1 };
- err = clEnqueueWriteBufferRect(queue, dst, CL_FALSE, buffer_origin, host_origin, region, ts*ne0/bs, 0, nb1, 0, x, 0, NULL, ev);
- return err;
+ const size_t region[3] = { row_size, ne1, 1 };
+ return clEnqueueWriteBufferRect(queue, dst, CL_FALSE, buffer_origin, host_origin, region, row_size, 0, nb1, 0, x, 0, NULL, ev);
}
+ std::vector<cl_event> events;
+ if (ev && ne1>1) events.reserve(ne1-1);
for (uint64_t i1 = 0; i1 < ne1; i1++) {
// pretend the row is a matrix with cols=1
- const size_t buffer_origin[3] = { offset, i1, 0 };
+ const size_t buffer_origin[3] = { offset + i1*row_size, 0, 0 };
const size_t host_origin[3] = { 0, 0, 0 };
- const size_t region[3] = { ts/bs, ne0, 1 };
- err = clEnqueueWriteBufferRect(queue, dst, CL_FALSE, buffer_origin, host_origin, region, 0, 0, nb0, 0, ((const char *)x) + i1*nb0, 0, NULL, ev);
+ const size_t region[3] = { ts, ne0/bs, 1 };
+ // if an event is requested, make the last write wait for all previous writes to complete
+ if (ev && i1) {
+ events.push_back(*ev);
+ }
+ cl_uint nevents = i1 == ne1-1 ? events.size() : 0U;
+ err = clEnqueueWriteBufferRect(queue, dst, CL_FALSE, buffer_origin, host_origin, region, ts, 0, nb0, 0, x + i1*nb1, nevents, nevents ? events.data() : nullptr, ev);
if (err != CL_SUCCESS) {
- break;
+ for (auto event : events) {
+ clReleaseEvent(event);
+ }
+ return err;
}
}
- return err;
+ for (auto event : events) {
+ CL_CHECK(clReleaseEvent(event));
+ }
+ return CL_SUCCESS;
}
static void ggml_cl_mul_f32(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
const int64_t ne01 = src0->ne[1];
const int64_t ne02 = src0->ne[2];
const int64_t ne03 = src0->ne[3];
- const int64_t ne0 = ne00 * ne01 * ne02 * ne03;
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
const int64_t ne12 = src1->ne[2];
const int64_t ne13 = src1->ne[3];
- const int64_t nb10 = src1->nb[0];
const int nb2 = dst->nb[2];
const int nb3 = dst->nb[3];
size_t x_size;
size_t d_size;
- cl_mem d_X = ggml_cl_pool_malloc(ne0 * sizeof(float), &x_size); // src0
+ cl_mem d_X = ggml_cl_pool_malloc(ne00 * ne01 * sizeof(float), &x_size); // src0
cl_mem d_Y = (cl_mem) src1->extra; // src1 is already on device, broadcasted.
- cl_mem d_D = ggml_cl_pool_malloc(ne0 * sizeof(float), &d_size); // dst
+ cl_mem d_D = ggml_cl_pool_malloc(ne00 * ne01 * sizeof(float), &d_size); // dst
for (int64_t i03 = 0; i03 < ne03; i03++) {
for (int64_t i02 = 0; i02 < ne02; i02++) {
- const int i0 = i03*ne02 + i02;
-
cl_event ev;
// copy src0 to device
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, i0, src0, i03, i02, &ev));
-
- if (nb10 == sizeof(float)) {
- // Contiguous, avoid overhead from queueing many kernel runs
- const int64_t i13 = i03%ne13;
- const int64_t i12 = i02%ne12;
- const int i1 = i13*ne12*ne11 + i12*ne11;
-
- cl_int x_offset = 0;
- cl_int y_offset = i1*ne10;
- cl_int d_offset = 0;
-
- size_t global = ne00 * ne01;
- cl_int ky = ne10;
- CL_CHECK(clSetKernelArg(mul_f32_cl, 0, sizeof(cl_mem), &d_X));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 1, sizeof(cl_int), &x_offset));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 2, sizeof(cl_mem), &d_Y));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 3, sizeof(cl_int), &y_offset));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 4, sizeof(cl_mem), &d_D));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 5, sizeof(cl_int), &d_offset));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 6, sizeof(cl_int), &ky));
- CL_CHECK(clEnqueueNDRangeKernel(queue, mul_f32_cl, 1, NULL, &global, NULL, 1, &ev, NULL));
- } else {
- for (int64_t i01 = 0; i01 < ne01; i01++) {
- const int64_t i13 = i03%ne13;
- const int64_t i12 = i02%ne12;
- const int64_t i11 = i01%ne11;
- const int i1 = i13*ne12*ne11 + i12*ne11 + i11;
-
- cl_int x_offset = i01*ne00;
- cl_int y_offset = i1*ne10;
- cl_int d_offset = i01*ne00;
+ CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, &ev));
- // compute
- size_t global = ne00;
- cl_int ky = ne10;
- CL_CHECK(clSetKernelArg(mul_f32_cl, 0, sizeof(cl_mem), &d_X));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 1, sizeof(cl_int), &x_offset));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 2, sizeof(cl_mem), &d_Y));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 3, sizeof(cl_int), &y_offset));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 4, sizeof(cl_mem), &d_D));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 5, sizeof(cl_int), &d_offset));
- CL_CHECK(clSetKernelArg(mul_f32_cl, 6, sizeof(cl_int), &ky));
- CL_CHECK(clEnqueueNDRangeKernel(queue, mul_f32_cl, 1, NULL, &global, NULL, 1, &ev, NULL));
- }
- }
+ const int64_t i13 = i03%ne13;
+ const int64_t i12 = i02%ne12;
+ const int i1 = i13*ne12*ne11 + i12*ne11;
+
+ cl_int x_offset = 0;
+ cl_int y_offset = i1*ne10;
+ cl_int d_offset = 0;
+
+ size_t global = ne00 * ne01;
+ cl_int ky = ne10 * ne11;
+
+ CL_CHECK(clSetKernelArg(mul_f32_cl, 0, sizeof(cl_mem), &d_X));
+ CL_CHECK(clSetKernelArg(mul_f32_cl, 1, sizeof(cl_int), &x_offset));
+ CL_CHECK(clSetKernelArg(mul_f32_cl, 2, sizeof(cl_mem), &d_Y));
+ CL_CHECK(clSetKernelArg(mul_f32_cl, 3, sizeof(cl_int), &y_offset));
+ CL_CHECK(clSetKernelArg(mul_f32_cl, 4, sizeof(cl_mem), &d_D));
+ CL_CHECK(clSetKernelArg(mul_f32_cl, 5, sizeof(cl_int), &d_offset));
+ CL_CHECK(clSetKernelArg(mul_f32_cl, 6, sizeof(cl_int), &ky));
+ CL_CHECK(clEnqueueNDRangeKernel(queue, mul_f32_cl, 1, NULL, &global, NULL, 1, &ev, NULL));
CL_CHECK(clReleaseEvent(ev));
CL_CHECK(clFinish(queue));
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
+ const int64_t ne12 = src1->ne[2];
+ const int64_t ne13 = src1->ne[3];
const int nb2 = dst->nb[2];
const int nb3 = dst->nb[3];
+ const int64_t r2 = ne12 / ne02;
+ const int64_t r3 = ne13 / ne03;
+
const float alpha = 1.0f;
const float beta = 0.0f;
const int x_ne = ne01 * ne00;
cl_mem d_Y = ggml_cl_pool_malloc(sizeof(float) * y_ne, &y_size);
cl_mem d_D = ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size);
+ size_t x_offset = 0;
+
for (int64_t i03 = 0; i03 < ne03; i03++) {
- for (int64_t i02 = 0; i02 < ne02; i02++) {
- // copy data to device
- if (src0->backend != GGML_BACKEND_GPU) {
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
- }
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i03, i02, NULL));
+ // TODO: copy src0 here when r3>1
+ for (int64_t i13 = i03 * r3, e13 = i13 + r3; i13 < e13; i13++) {
+ for (int64_t i02 = 0; i02 < ne02; i02++) {
+ if (src0->backend == GGML_BACKEND_GPU) {
+ x_offset = (i03 * ne02 + i02) * x_ne;
+ } else {
+ // copy src0 to device
+ CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
+ }
- CL_CHECK(clFinish(queue));
+ for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
+ // copy src1 to device
+ CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL));
- // compute
- cl_event ev_sgemm;
- clblast::StatusCode status = clblast::Gemm<cl_float>(clblast::Layout::kColMajor,
- clblast::Transpose::kYes, clblast::Transpose::kNo,
- ne01, ne11, ne10,
- alpha,
- d_X, 0, ne00,
- d_Y, 0, ne10,
- beta,
- d_D, 0, ne01,
- &queue, &ev_sgemm);
-
- if (status != clblast::StatusCode::kSuccess) {
- GGML_ASSERT(false);
- }
+ CL_CHECK(clFinish(queue));
- // copy dst to host
- float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
- CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL));
+ // compute
+ cl_event ev_sgemm;
+ clblast::StatusCode status = clblast::Gemm<cl_float>(clblast::Layout::kColMajor,
+ clblast::Transpose::kYes, clblast::Transpose::kNo,
+ ne01, ne11, ne10,
+ alpha,
+ d_X, x_offset, ne00,
+ d_Y, 0, ne10,
+ beta,
+ d_D, 0, ne01,
+ &queue, &ev_sgemm);
+
+ if (status != clblast::StatusCode::kSuccess) {
+ GGML_ASSERT(false);
+ }
+
+ // copy dst to host
+ float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
+ CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL));
+ }
+ }
}
}
ggml_cl_pool_free(d_D, d_size);
}
-static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, void * wdata, size_t /* wsize */) {
+static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, void * wdata, size_t wsize) {
GGML_ASSERT(fp16_support);
const int64_t ne00 = src0->ne[0];
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
+ const int64_t ne12 = src1->ne[2];
+ const int64_t ne13 = src1->ne[3];
const int nb10 = src1->nb[0];
const int nb11 = src1->nb[1];
const int nb2 = dst->nb[2];
const int nb3 = dst->nb[3];
+ const int64_t r2 = ne12 / ne02;
+ const int64_t r3 = ne13 / ne03;
+
const ggml_fp16_t alpha = ggml_fp32_to_fp16(1.0f);
const ggml_fp16_t beta = ggml_fp32_to_fp16(0.0f);
const int x_ne = ne01 * ne00;
const int y_ne = ne11 * ne10;
const int d_ne = ne11 * ne01;
+ GGML_ASSERT(wsize >= sizeof(ggml_fp16_t) * y_ne);
+ GGML_ASSERT(wsize >= sizeof(ggml_fp16_t) * d_ne);
+ ggml_fp16_t * const tmp = (ggml_fp16_t *) wdata;
+
size_t x_size;
size_t y_size;
size_t d_size;
bool src1_cont_rows = nb10 == sizeof(float);
bool src1_cont_cols = (size_t)nb11 == ne11*sizeof(float);
- for (int64_t i03 = 0; i03 < ne03; i03++) {
- for (int64_t i02 = 0; i02 < ne02; i02++) {
- // copy src0 to device
- if (src0->backend != GGML_BACKEND_GPU) {
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
- }
+ size_t x_offset = 0;
- // convert src1 to fp16
- // TODO: use multiple threads
- ggml_fp16_t * const tmp = (ggml_fp16_t *) wdata + (ne11 * ne10) * (i03 * ne02 + i02);
- char * src1i = (char *) src1->data + i03*nb13 + i02*nb12;
- if (src1_cont_rows) {
- if (src1_cont_cols) {
- ggml_fp32_to_fp16_row((float *) src1i, tmp, ne10*ne11);
+ for (int64_t i03 = 0; i03 < ne03; i03++) {
+ // TODO: copy src0 here when r3>1
+ for (int64_t i13 = i03 * r3, e13 = i13 + r3; i13 < e13; i13++) {
+ for (int64_t i02 = 0; i02 < ne02; i02++) {
+ if (src0->backend == GGML_BACKEND_GPU) {
+ x_offset = (i03 * ne02 + i02) * x_ne;
+ } else {
+ // copy src0 to device
+ CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
}
- else {
- for (int64_t i01 = 0; i01 < ne11; i01++) {
- ggml_fp32_to_fp16_row((float *) (src1i + i01*nb11), tmp + i01*ne10, ne10);
+
+ for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
+ // convert src1 to fp16
+ // TODO: use multiple threads
+ char * src1i = (char *) src1->data + i13*nb13 + i12*nb12;
+ if (src1_cont_rows) {
+ if (src1_cont_cols) {
+ ggml_fp32_to_fp16_row((float *) src1i, tmp, ne10*ne11);
+ }
+ else {
+ for (int64_t i11 = 0; i11 < ne11; i11++) {
+ ggml_fp32_to_fp16_row((float *) (src1i + i11*nb11), tmp + i11*ne10, ne10);
+ }
+ }
}
- }
- }
- else {
- for (int64_t i01 = 0; i01 < ne11; i01++) {
- for (int64_t i00 = 0; i00 < ne10; i00++) {
- // very slow due to no inlining
- tmp[i01*ne10 + i00] = ggml_fp32_to_fp16(*(float *) (src1i + i01*nb11 + i00*nb10));
+ else {
+ for (int64_t i11 = 0; i11 < ne11; i11++) {
+ for (int64_t i10 = 0; i10 < ne10; i10++) {
+ // very slow due to no inlining
+ tmp[i11*ne10 + i10] = ggml_fp32_to_fp16(*(float *) (src1i + i11*nb11 + i10*nb10));
+ }
+ }
}
- }
- }
- // copy src1 to device
- CL_CHECK(clEnqueueWriteBuffer(queue, d_Y, false, 0, sizeof(ggml_fp16_t) * y_ne, tmp, 0, NULL, NULL));
+ // copy src1 to device
+ CL_CHECK(clEnqueueWriteBuffer(queue, d_Y, false, 0, sizeof(ggml_fp16_t) * y_ne, tmp, 0, NULL, NULL));
- CL_CHECK(clFinish(queue));
+ CL_CHECK(clFinish(queue));
- // compute
- cl_event ev_sgemm;
- clblast::StatusCode status = clblast::Gemm<cl_half>(clblast::Layout::kColMajor,
- clblast::Transpose::kYes, clblast::Transpose::kNo,
- ne01, ne11, ne10,
- alpha,
- d_X, 0, ne00,
- d_Y, 0, ne10,
- beta,
- d_D, 0, ne01,
- &queue, &ev_sgemm);
-
- if (status != clblast::StatusCode::kSuccess) {
- GGML_ASSERT(false);
- }
+ // compute
+ cl_event ev_sgemm;
+ clblast::StatusCode status = clblast::Gemm<cl_half>(clblast::Layout::kColMajor,
+ clblast::Transpose::kYes, clblast::Transpose::kNo,
+ ne01, ne11, ne10,
+ alpha,
+ d_X, x_offset, ne00,
+ d_Y, 0, ne10,
+ beta,
+ d_D, 0, ne01,
+ &queue, &ev_sgemm);
+
+ if (status != clblast::StatusCode::kSuccess) {
+ GGML_ASSERT(false);
+ }
- // copy dst to host, then convert to float
- CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(ggml_fp16_t) * d_ne, tmp, 1, &ev_sgemm, NULL));
+ // copy dst to host, then convert to float
+ CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(ggml_fp16_t) * d_ne, tmp, 1, &ev_sgemm, NULL));
- float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
+ float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
- ggml_fp16_to_fp32_row(tmp, d, d_ne);
+ ggml_fp16_to_fp32_row(tmp, d, d_ne);
+ }
+ }
}
}
const int64_t ne10 = src1->ne[0];
const int64_t ne11 = src1->ne[1];
+ const int64_t ne12 = src1->ne[2];
+ const int64_t ne13 = src1->ne[3];
const int nb2 = dst->nb[2];
const int nb3 = dst->nb[3];
const ggml_type type = src0->type;
- const bool mul_mat_vec = ne11 == 1;
+ const bool mul_mat_vec = ne11 == 1 && ne00%2 == 0;
+
+ const int64_t r2 = ne12 / ne02;
+ const int64_t r3 = ne13 / ne03;
const float alpha = 1.0f;
const float beta = 0.0f;
const int x_ne = ne01 * ne00;
const int y_ne = ne11 * ne10;
const int d_ne = ne11 * ne01;
- const size_t q_sz = ggml_type_size(type) * x_ne / ggml_blck_size(type);
+ const int x_bps = x_ne / ggml_blck_size(type); // blocks per 2D slice
+ const size_t q_sz = ggml_type_size(type) * x_bps;
size_t x_size;
size_t y_size;
GGML_ASSERT(to_fp32_cl != nullptr);
const size_t global_denom = ggml_cl_global_denom(type);
- const size_t local = ggml_cl_local_size(type);
+ const size_t local = mul_mat_vec ? CL_DMMV_LOCAL_SIZE : ggml_cl_local_size(type);
size_t ev_idx = 0;
std::vector<cl_event> events;
for (int64_t i03 = 0; i03 < ne03; i03++) {
- for (int64_t i02 = 0; i02 < ne02; i02++) {
- // copy src0 to device if necessary
- if (src0->backend == GGML_BACKEND_CPU) {
- events.emplace_back();
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, events.data() + ev_idx++));
- } else if (src0->backend == GGML_BACKEND_GPU) {
- d_Q = (cl_mem) src0->extra;
- } else {
- GGML_ASSERT(false);
- }
- if (mul_mat_vec) { // specialized dequantize_mul_mat_vec kernel
- // copy src1 to device
- events.emplace_back();
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i03, i02, events.data() + ev_idx++));
-
- // compute
- const size_t global = ne01 * CL_DMMV_BLOCK_SIZE;
- const size_t local = CL_DMMV_BLOCK_SIZE;
- const cl_int ncols = ne00;
- events.emplace_back();
- CL_CHECK(clSetKernelArg(*dmmv, 0, sizeof(cl_mem), &d_Q));
- CL_CHECK(clSetKernelArg(*dmmv, 1, sizeof(float) * local, NULL));
- CL_CHECK(clSetKernelArg(*dmmv, 2, sizeof(cl_mem), &d_Y));
- CL_CHECK(clSetKernelArg(*dmmv, 3, sizeof(cl_mem), &d_D));
- CL_CHECK(clSetKernelArg(*dmmv, 4, sizeof(cl_int), &ncols));
- CL_CHECK(clEnqueueNDRangeKernel(queue, *dmmv, 1, NULL, &global, &local, events.size() - 1, events.data(), events.data() + ev_idx++));
- } else { // general dequantization kernel + CLBlast matrix matrix multiplication
- // convert src0 to fp32 on device
- const size_t global = x_ne / global_denom;
- CL_CHECK(clSetKernelArg(*to_fp32_cl, 0, sizeof(cl_mem), &d_Q));
- CL_CHECK(clSetKernelArg(*to_fp32_cl, 1, sizeof(cl_mem), &d_X));
- CL_CHECK(clEnqueueNDRangeKernel(queue, *to_fp32_cl, 1, NULL, &global, local > 0 ? &local : NULL, events.size(), !events.empty() ? events.data() : NULL, NULL));
-
- // copy src1 to device
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i03, i02, NULL));
-
- events.emplace_back();
-
- // wait for conversion
- CL_CHECK(clFinish(queue));
-
- // compute
- clblast::StatusCode status = clblast::Gemm<cl_float>(clblast::Layout::kColMajor,
- clblast::Transpose::kYes, clblast::Transpose::kNo,
- ne01, ne11, ne10,
- alpha,
- d_X, 0, ne00,
- d_Y, 0, ne10,
- beta,
- d_D, 0, ne01,
- &queue, events.data() + ev_idx++);
-
- if (status != clblast::StatusCode::kSuccess) {
+ // TODO: copy and dequantize src0 here when r3>1
+ for (int64_t i13 = i03 * r3, e13 = i13 + r3; i13 < e13; i13++) {
+ for (int64_t i02 = 0; i02 < ne02; i02++) {
+ // copy src0 to device if necessary
+ if (src0->backend == GGML_BACKEND_CPU) {
+ events.emplace_back();
+ CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, events.data() + ev_idx++));
+ } else if (src0->backend == GGML_BACKEND_GPU) {
+ d_Q = (cl_mem) src0->extra;
+ } else {
GGML_ASSERT(false);
}
- }
- // copy dst to host
- float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
- CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &events[events.size() - 1], NULL));
- for (auto *event : events) {
- clReleaseEvent(event);
- }
+ if (!mul_mat_vec) {
+ // convert src0 to fp32 on device
+ const size_t global = x_ne / global_denom;
+ const size_t offset = src0->backend == GGML_BACKEND_GPU ? (i03 * ne02 + i02) * x_bps : 0;
+ CL_CHECK(clSetKernelArg(*to_fp32_cl, 0, sizeof(cl_mem), &d_Q));
+ CL_CHECK(clSetKernelArg(*to_fp32_cl, 1, sizeof(cl_mem), &d_X));
+ CL_CHECK(clEnqueueNDRangeKernel(queue, *to_fp32_cl, 1, &offset, &global, local > 0 ? &local : NULL, events.size(), !events.empty() ? events.data() : NULL, NULL));
+ }
+
+ for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
+ if (mul_mat_vec) { // specialized dequantize_mul_mat_vec kernel
+ // copy src1 to device
+ events.emplace_back();
+ CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, events.data() + ev_idx++));
+
+ // compute
+ const size_t global = ne01 * local;
+ const size_t offset = src0->backend == GGML_BACKEND_GPU ? (i03 * ne02 + i02) * x_bps : 0;
+ const cl_int ncols = ne00;
+ events.emplace_back();
+ CL_CHECK(clSetKernelArg(*dmmv, 0, sizeof(cl_mem), &d_Q));
+ CL_CHECK(clSetKernelArg(*dmmv, 1, sizeof(float) * local, NULL));
+ CL_CHECK(clSetKernelArg(*dmmv, 2, sizeof(cl_mem), &d_Y));
+ CL_CHECK(clSetKernelArg(*dmmv, 3, sizeof(cl_mem), &d_D));
+ CL_CHECK(clSetKernelArg(*dmmv, 4, sizeof(cl_int), &ncols));
+ CL_CHECK(clEnqueueNDRangeKernel(queue, *dmmv, 1, &offset, &global, &local, events.size() - 1, events.data(), events.data() + ev_idx++));
+ } else { // CLBlast matrix matrix multiplication
+ // copy src1 to device
+ CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL));
+
+ // wait for conversion
+ CL_CHECK(clFinish(queue));
+
+ // compute
+ events.emplace_back();
+ clblast::StatusCode status = clblast::Gemm<cl_float>(clblast::Layout::kColMajor,
+ clblast::Transpose::kYes, clblast::Transpose::kNo,
+ ne01, ne11, ne10,
+ alpha,
+ d_X, 0, ne00,
+ d_Y, 0, ne10,
+ beta,
+ d_D, 0, ne01,
+ &queue, events.data() + ev_idx++);
+
+ if (status != clblast::StatusCode::kSuccess) {
+ GGML_ASSERT(false);
+ }
+ }
- ev_idx = 0;
- events.clear();
+ // copy dst to host
+ float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
+ CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &events[events.size() - 1], NULL));
+ for (auto *event : events) {
+ clReleaseEvent(event);
+ }
+
+ ev_idx = 0;
+ events.clear();
+ }
+ }
}
}
return false;
}
-bool ggml_cl_mul_mat_use_f16(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * /* dst */) {
+static bool ggml_cl_mul_mat_use_f16(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * /* dst */) {
// If device doesn't support FP16
if (!fp16_support) {
return false;
}
size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
- if (ggml_cl_mul_mat_use_f16(src0, src1, dst)) {
- return ggml_nelements(src1) * sizeof(ggml_fp16_t);
+ if (src0->type == GGML_TYPE_F16 && ggml_cl_mul_mat_use_f16(src0, src1, dst)) {
+ return sizeof(ggml_fp16_t) * std::max(src1->ne[0] * src1->ne[1], dst->ne[0] * dst->ne[1]);
}
return 0;
}
const int64_t ne3 = tensor->ne[3];
const ggml_type type = tensor->type;
- const size_t q_sz = ggml_type_size(type) * ne0 * ne1 * ne2 * ne3 / ggml_blck_size(type);
+ const size_t s_sz = ggml_type_size(type) * (size_t) (ne0 * ne1 / ggml_blck_size(type));
+ const size_t q_sz = s_sz * (size_t) (ne2 * ne3);
size_t q_size;
cl_mem dst = ggml_cl_pool_malloc(q_sz, &q_size);
tensor->data = data;
// copy tensor to device
+ size_t offset = 0;
for (int64_t i3 = 0; i3 < ne3; i3++) {
for (int64_t i2 = 0; i2 < ne2; i2++) {
- int i = i3*ne2 + i2;
- CL_CHECK(ggml_cl_h2d_tensor_2d(queue, dst, i*ne0*ne1, tensor, i3, i2, NULL));
+ CL_CHECK(ggml_cl_h2d_tensor_2d(queue, dst, offset, tensor, i3, i2, NULL));
+ offset += s_sz;
}
}
--- /dev/null
+#include "ggml-quants.h"
+#include "ggml-impl.h"
+
+#include <math.h>
+#include <string.h>
+#include <assert.h>
+#include <float.h>
+
+#ifdef __ARM_NEON
+
+// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
+//
+// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
+//
+#include <arm_neon.h>
+
+#else
+
+#ifdef __wasm_simd128__
+#include <wasm_simd128.h>
+#else
+#ifdef __POWER9_VECTOR__
+#include <altivec.h>
+#undef bool
+#define bool _Bool
+#else
+#if defined(_MSC_VER) || defined(__MINGW32__)
+#include <intrin.h>
+#else
+#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
+#if !defined(__riscv)
+#include <immintrin.h>
+#endif
+#endif
+#endif
+#endif
+#endif
+#endif
+
+#ifdef __riscv_v_intrinsic
+#include <riscv_vector.h>
+#endif
+
+#undef MIN
+#undef MAX
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
+
+#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
+// multiply int8_t, add results pairwise twice
+static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
+ // Get absolute values of x vectors
+ const __m128i ax = _mm_sign_epi8(x, x);
+ // Sign the values of the y vectors
+ const __m128i sy = _mm_sign_epi8(y, x);
+ // Perform multiplication and create 16-bit values
+ const __m128i dot = _mm_maddubs_epi16(ax, sy);
+ const __m128i ones = _mm_set1_epi16(1);
+ return _mm_madd_epi16(ones, dot);
+}
+
+#if __AVX__ || __AVX2__ || __AVX512F__
+// horizontally add 8 floats
+static inline float hsum_float_8(const __m256 x) {
+ __m128 res = _mm256_extractf128_ps(x, 1);
+ res = _mm_add_ps(res, _mm256_castps256_ps128(x));
+ res = _mm_add_ps(res, _mm_movehl_ps(res, res));
+ res = _mm_add_ss(res, _mm_movehdup_ps(res));
+ return _mm_cvtss_f32(res);
+}
+
+// horizontally add 8 int32_t
+static inline int hsum_i32_8(const __m256i a) {
+ const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
+ const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
+ const __m128i sum64 = _mm_add_epi32(hi64, sum128);
+ const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
+ return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
+}
+
+// horizontally add 4 int32_t
+static inline int hsum_i32_4(const __m128i a) {
+ const __m128i hi64 = _mm_unpackhi_epi64(a, a);
+ const __m128i sum64 = _mm_add_epi32(hi64, a);
+ const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
+ return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
+}
+
+#if defined(__AVX2__) || defined(__AVX512F__)
+// spread 32 bits to 32 bytes { 0x00, 0xFF }
+static inline __m256i bytes_from_bits_32(const uint8_t * x) {
+ uint32_t x32;
+ memcpy(&x32, x, sizeof(uint32_t));
+ const __m256i shuf_mask = _mm256_set_epi64x(
+ 0x0303030303030303, 0x0202020202020202,
+ 0x0101010101010101, 0x0000000000000000);
+ __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
+ const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
+ bytes = _mm256_or_si256(bytes, bit_mask);
+ return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
+}
+
+// Unpack 32 4-bit fields into 32 bytes
+// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
+static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
+{
+ const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
+ const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
+ const __m256i lowMask = _mm256_set1_epi8( 0xF );
+ return _mm256_and_si256(lowMask, bytes);
+}
+
+// add int16_t pairwise and return as float vector
+static inline __m256 sum_i16_pairs_float(const __m256i x) {
+ const __m256i ones = _mm256_set1_epi16(1);
+ const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
+ return _mm256_cvtepi32_ps(summed_pairs);
+}
+
+static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
+#if __AVXVNNI__
+ const __m256i zero = _mm256_setzero_si256();
+ const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
+ return _mm256_cvtepi32_ps(summed_pairs);
+#else
+ // Perform multiplication and create 16-bit values
+ const __m256i dot = _mm256_maddubs_epi16(ax, sy);
+ return sum_i16_pairs_float(dot);
+#endif
+}
+
+// multiply int8_t, add results pairwise twice and return as float vector
+static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
+#if __AVXVNNIINT8__
+ const __m256i zero = _mm256_setzero_si256();
+ const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
+ return _mm256_cvtepi32_ps(summed_pairs);
+#else
+ // Get absolute values of x vectors
+ const __m256i ax = _mm256_sign_epi8(x, x);
+ // Sign the values of the y vectors
+ const __m256i sy = _mm256_sign_epi8(y, x);
+ return mul_sum_us8_pairs_float(ax, sy);
+#endif
+}
+
+static inline __m128i packNibbles( __m256i bytes )
+{
+ // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
+#if __AVX512F__
+ const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
+ bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
+ return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
+#else
+ const __m256i lowByte = _mm256_set1_epi16( 0xFF );
+ __m256i high = _mm256_andnot_si256( lowByte, bytes );
+ __m256i low = _mm256_and_si256( lowByte, bytes );
+ high = _mm256_srli_epi16( high, 4 );
+ bytes = _mm256_or_si256( low, high );
+
+ // Compress uint16_t lanes into bytes
+ __m128i r0 = _mm256_castsi256_si128( bytes );
+ __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
+ return _mm_packus_epi16( r0, r1 );
+#endif
+}
+#elif defined(__AVX__)
+// spread 32 bits to 32 bytes { 0x00, 0xFF }
+static inline __m256i bytes_from_bits_32(const uint8_t * x) {
+ uint32_t x32;
+ memcpy(&x32, x, sizeof(uint32_t));
+ const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
+ const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
+ __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
+ __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
+ const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
+ bytesl = _mm_or_si128(bytesl, bit_mask);
+ bytesh = _mm_or_si128(bytesh, bit_mask);
+ bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
+ bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
+ return MM256_SET_M128I(bytesh, bytesl);
+}
+
+// Unpack 32 4-bit fields into 32 bytes
+// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
+static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
+{
+ // Load 16 bytes from memory
+ __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
+ __m128i tmph = _mm_srli_epi16(tmpl, 4);
+ const __m128i lowMask = _mm_set1_epi8(0xF);
+ tmpl = _mm_and_si128(lowMask, tmpl);
+ tmph = _mm_and_si128(lowMask, tmph);
+ return MM256_SET_M128I(tmph, tmpl);
+}
+
+// add int16_t pairwise and return as float vector
+static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
+ const __m128i ones = _mm_set1_epi16(1);
+ const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
+ const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
+ const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
+ return _mm256_cvtepi32_ps(summed_pairs);
+}
+
+static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
+ const __m128i axl = _mm256_castsi256_si128(ax);
+ const __m128i axh = _mm256_extractf128_si256(ax, 1);
+ const __m128i syl = _mm256_castsi256_si128(sy);
+ const __m128i syh = _mm256_extractf128_si256(sy, 1);
+ // Perform multiplication and create 16-bit values
+ const __m128i dotl = _mm_maddubs_epi16(axl, syl);
+ const __m128i doth = _mm_maddubs_epi16(axh, syh);
+ return sum_i16_pairs_float(doth, dotl);
+}
+
+// multiply int8_t, add results pairwise twice and return as float vector
+static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
+ const __m128i xl = _mm256_castsi256_si128(x);
+ const __m128i xh = _mm256_extractf128_si256(x, 1);
+ const __m128i yl = _mm256_castsi256_si128(y);
+ const __m128i yh = _mm256_extractf128_si256(y, 1);
+ // Get absolute values of x vectors
+ const __m128i axl = _mm_sign_epi8(xl, xl);
+ const __m128i axh = _mm_sign_epi8(xh, xh);
+ // Sign the values of the y vectors
+ const __m128i syl = _mm_sign_epi8(yl, xl);
+ const __m128i syh = _mm_sign_epi8(yh, xh);
+ // Perform multiplication and create 16-bit values
+ const __m128i dotl = _mm_maddubs_epi16(axl, syl);
+ const __m128i doth = _mm_maddubs_epi16(axh, syh);
+ return sum_i16_pairs_float(doth, dotl);
+}
+
+static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
+{
+ // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
+ const __m128i lowByte = _mm_set1_epi16( 0xFF );
+ __m128i high = _mm_andnot_si128( lowByte, bytes1 );
+ __m128i low = _mm_and_si128( lowByte, bytes1 );
+ high = _mm_srli_epi16( high, 4 );
+ bytes1 = _mm_or_si128( low, high );
+ high = _mm_andnot_si128( lowByte, bytes2 );
+ low = _mm_and_si128( lowByte, bytes2 );
+ high = _mm_srli_epi16( high, 4 );
+ bytes2 = _mm_or_si128( low, high );
+
+ return _mm_packus_epi16( bytes1, bytes2);
+}
+#endif
+#elif defined(__SSSE3__)
+// horizontally add 4x4 floats
+static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
+ __m128 res_0 =_mm_hadd_ps(a, b);
+ __m128 res_1 =_mm_hadd_ps(c, d);
+ __m128 res =_mm_hadd_ps(res_0, res_1);
+ res =_mm_hadd_ps(res, res);
+ res =_mm_hadd_ps(res, res);
+
+ return _mm_cvtss_f32(res);
+}
+#endif // __AVX__ || __AVX2__ || __AVX512F__
+#endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
+
+#if defined(__ARM_NEON)
+#if !defined(__aarch64__)
+
+// 64-bit compatibility
+
+// vaddvq_s16
+// vpaddq_s16
+// vaddvq_s32
+// vaddvq_f32
+// vmaxvq_f32
+// vcvtnq_s32_f32
+
+inline static int32_t vaddvq_s16(int16x8_t v) {
+ return
+ (int32_t)vgetq_lane_s16(v, 0) + (int32_t)vgetq_lane_s16(v, 1) +
+ (int32_t)vgetq_lane_s16(v, 2) + (int32_t)vgetq_lane_s16(v, 3) +
+ (int32_t)vgetq_lane_s16(v, 4) + (int32_t)vgetq_lane_s16(v, 5) +
+ (int32_t)vgetq_lane_s16(v, 6) + (int32_t)vgetq_lane_s16(v, 7);
+}
+
+inline static int16x8_t vpaddq_s16(int16x8_t a, int16x8_t b) {
+ int16x4_t a0 = vpadd_s16(vget_low_s16(a), vget_high_s16(a));
+ int16x4_t b0 = vpadd_s16(vget_low_s16(b), vget_high_s16(b));
+ return vcombine_s16(a0, b0);
+}
+
+inline static int32_t vaddvq_s32(int32x4_t v) {
+ return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
+}
+
+inline static float vaddvq_f32(float32x4_t v) {
+ return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
+}
+
+inline static float vmaxvq_f32(float32x4_t v) {
+ return
+ MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
+ MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
+}
+
+inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
+ int32x4_t res;
+
+ res[0] = roundf(vgetq_lane_f32(v, 0));
+ res[1] = roundf(vgetq_lane_f32(v, 1));
+ res[2] = roundf(vgetq_lane_f32(v, 2));
+ res[3] = roundf(vgetq_lane_f32(v, 3));
+
+ return res;
+}
+
+// vld1q_s16_x2
+// vld1q_u8_x2
+// vld1q_u8_x4
+// vld1q_s8_x2
+// vld1q_s8_x4
+// TODO: double-check these work correctly
+
+typedef struct ggml_int16x8x2_t {
+ int16x8_t val[2];
+} ggml_int16x8x2_t;
+
+inline static ggml_int16x8x2_t ggml_vld1q_s16_x2(const int16_t * ptr) {
+ ggml_int16x8x2_t res;
+
+ res.val[0] = vld1q_s16(ptr + 0);
+ res.val[1] = vld1q_s16(ptr + 8);
+
+ return res;
+}
+
+typedef struct ggml_uint8x16x2_t {
+ uint8x16_t val[2];
+} ggml_uint8x16x2_t;
+
+inline static ggml_uint8x16x2_t ggml_vld1q_u8_x2(const uint8_t * ptr) {
+ ggml_uint8x16x2_t res;
+
+ res.val[0] = vld1q_u8(ptr + 0);
+ res.val[1] = vld1q_u8(ptr + 16);
+
+ return res;
+}
+
+typedef struct ggml_uint8x16x4_t {
+ uint8x16_t val[4];
+} ggml_uint8x16x4_t;
+
+inline static ggml_uint8x16x4_t ggml_vld1q_u8_x4(const uint8_t * ptr) {
+ ggml_uint8x16x4_t res;
+
+ res.val[0] = vld1q_u8(ptr + 0);
+ res.val[1] = vld1q_u8(ptr + 16);
+ res.val[2] = vld1q_u8(ptr + 32);
+ res.val[3] = vld1q_u8(ptr + 48);
+
+ return res;
+}
+
+typedef struct ggml_int8x16x2_t {
+ int8x16_t val[2];
+} ggml_int8x16x2_t;
+
+inline static ggml_int8x16x2_t ggml_vld1q_s8_x2(const int8_t * ptr) {
+ ggml_int8x16x2_t res;
+
+ res.val[0] = vld1q_s8(ptr + 0);
+ res.val[1] = vld1q_s8(ptr + 16);
+
+ return res;
+}
+
+typedef struct ggml_int8x16x4_t {
+ int8x16_t val[4];
+} ggml_int8x16x4_t;
+
+inline static ggml_int8x16x4_t ggml_vld1q_s8_x4(const int8_t * ptr) {
+ ggml_int8x16x4_t res;
+
+ res.val[0] = vld1q_s8(ptr + 0);
+ res.val[1] = vld1q_s8(ptr + 16);
+ res.val[2] = vld1q_s8(ptr + 32);
+ res.val[3] = vld1q_s8(ptr + 48);
+
+ return res;
+}
+
+#else
+
+#define ggml_int16x8x2_t int16x8x2_t
+#define ggml_uint8x16x2_t uint8x16x2_t
+#define ggml_uint8x16x4_t uint8x16x4_t
+#define ggml_int8x16x2_t int8x16x2_t
+#define ggml_int8x16x4_t int8x16x4_t
+
+#define ggml_vld1q_s16_x2 vld1q_s16_x2
+#define ggml_vld1q_u8_x2 vld1q_u8_x2
+#define ggml_vld1q_u8_x4 vld1q_u8_x4
+#define ggml_vld1q_s8_x2 vld1q_s8_x2
+#define ggml_vld1q_s8_x4 vld1q_s8_x4
+
+#endif
+#endif
+
+#if defined(__ARM_NEON) || defined(__wasm_simd128__)
+#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
+#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
+#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
+#define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
+#define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
+#define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
+#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
+#define B8(c,s ) B7(c,s, c), B7(c,s, s)
+
+// precomputed tables for expanding 8bits to 8 bytes:
+static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
+static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
+#endif
+
+// reference implementation for deterministic creation of model files
+void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
+ static const int qk = QK4_0;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+ float max = 0.0f;
+
+ for (int j = 0; j < qk; j++) {
+ const float v = x[i*qk + j];
+ if (amax < fabsf(v)) {
+ amax = fabsf(v);
+ max = v;
+ }
+ }
+
+ const float d = max / -8;
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ for (int j = 0; j < qk/2; ++j) {
+ const float x0 = x[i*qk + 0 + j]*id;
+ const float x1 = x[i*qk + qk/2 + j]*id;
+
+ const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
+ const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
+
+ y[i].qs[j] = xi0;
+ y[i].qs[j] |= xi1 << 4;
+ }
+ }
+}
+
+void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q4_0_reference(x, y, k);
+}
+
+void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
+ const int qk = QK4_1;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ float min = FLT_MAX;
+ float max = -FLT_MAX;
+
+ for (int j = 0; j < qk; j++) {
+ const float v = x[i*qk + j];
+
+ if (v < min) min = v;
+ if (v > max) max = v;
+ }
+
+ const float d = (max - min) / ((1 << 4) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+ y[i].m = GGML_FP32_TO_FP16(min);
+
+ for (int j = 0; j < qk/2; ++j) {
+ const float x0 = (x[i*qk + 0 + j] - min)*id;
+ const float x1 = (x[i*qk + qk/2 + j] - min)*id;
+
+ const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
+ const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
+
+ y[i].qs[j] = xi0;
+ y[i].qs[j] |= xi1 << 4;
+ }
+ }
+}
+
+void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q4_1_reference(x, y, k);
+}
+
+void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
+ static const int qk = QK5_0;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+ float max = 0.0f;
+
+ for (int j = 0; j < qk; j++) {
+ const float v = x[i*qk + j];
+ if (amax < fabsf(v)) {
+ amax = fabsf(v);
+ max = v;
+ }
+ }
+
+ const float d = max / -16;
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ uint32_t qh = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const float x0 = x[i*qk + 0 + j]*id;
+ const float x1 = x[i*qk + qk/2 + j]*id;
+
+ const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
+ const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
+
+ y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
+
+ // get the 5-th bit and store it in qh at the right position
+ qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
+ qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
+ }
+
+ memcpy(&y[i].qh, &qh, sizeof(qh));
+ }
+}
+
+void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q5_0_reference(x, y, k);
+}
+
+void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
+ const int qk = QK5_1;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ float min = FLT_MAX;
+ float max = -FLT_MAX;
+
+ for (int j = 0; j < qk; j++) {
+ const float v = x[i*qk + j];
+
+ if (v < min) min = v;
+ if (v > max) max = v;
+ }
+
+ const float d = (max - min) / ((1 << 5) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+ y[i].m = GGML_FP32_TO_FP16(min);
+
+ uint32_t qh = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const float x0 = (x[i*qk + 0 + j] - min)*id;
+ const float x1 = (x[i*qk + qk/2 + j] - min)*id;
+
+ const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
+ const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
+
+ y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
+
+ // get the 5-th bit and store it in qh at the right position
+ qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
+ qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
+ }
+
+ memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
+ }
+}
+
+void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q5_1_reference(x, y, k);
+}
+
+// reference implementation for deterministic creation of model files
+void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
+ assert(k % QK8_0 == 0);
+ const int nb = k / QK8_0;
+
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+
+ for (int j = 0; j < QK8_0; j++) {
+ const float v = x[i*QK8_0 + j];
+ amax = MAX(amax, fabsf(v));
+ }
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ for (int j = 0; j < QK8_0; ++j) {
+ const float x0 = x[i*QK8_0 + j]*id;
+
+ y[i].qs[j] = roundf(x0);
+ }
+ }
+}
+
+void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
+ assert(QK8_0 == 32);
+ assert(k % QK8_0 == 0);
+ const int nb = k / QK8_0;
+
+ block_q8_0 * restrict y = vy;
+
+#if defined(__ARM_NEON)
+ for (int i = 0; i < nb; i++) {
+ float32x4_t srcv [8];
+ float32x4_t asrcv[8];
+ float32x4_t amaxv[8];
+
+ for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
+ for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
+
+ for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
+ for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
+ for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
+
+ const float amax = vmaxvq_f32(amaxv[0]);
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ for (int j = 0; j < 8; j++) {
+ const float32x4_t v = vmulq_n_f32(srcv[j], id);
+ const int32x4_t vi = vcvtnq_s32_f32(v);
+
+ y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
+ y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
+ y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
+ y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
+ }
+ }
+#elif defined(__wasm_simd128__)
+ for (int i = 0; i < nb; i++) {
+ v128_t srcv [8];
+ v128_t asrcv[8];
+ v128_t amaxv[8];
+
+ for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
+ for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
+
+ for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
+ for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
+ for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
+
+ const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
+ wasm_f32x4_extract_lane(amaxv[0], 1)),
+ MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
+ wasm_f32x4_extract_lane(amaxv[0], 3)));
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ for (int j = 0; j < 8; j++) {
+ const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
+ const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
+
+ y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
+ y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
+ y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
+ y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
+ }
+ }
+#elif defined(__AVX2__) || defined(__AVX__)
+ for (int i = 0; i < nb; i++) {
+ // Load elements into 4 AVX vectors
+ __m256 v0 = _mm256_loadu_ps( x );
+ __m256 v1 = _mm256_loadu_ps( x + 8 );
+ __m256 v2 = _mm256_loadu_ps( x + 16 );
+ __m256 v3 = _mm256_loadu_ps( x + 24 );
+ x += 32;
+
+ // Compute max(abs(e)) for the block
+ const __m256 signBit = _mm256_set1_ps( -0.0f );
+ __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
+
+ __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
+ max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
+ max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
+ const float maxScalar = _mm_cvtss_f32( max4 );
+
+ // Quantize these floats
+ const float d = maxScalar / 127.f;
+ y[i].d = GGML_FP32_TO_FP16(d);
+ const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
+ const __m256 mul = _mm256_set1_ps( id );
+
+ // Apply the multiplier
+ v0 = _mm256_mul_ps( v0, mul );
+ v1 = _mm256_mul_ps( v1, mul );
+ v2 = _mm256_mul_ps( v2, mul );
+ v3 = _mm256_mul_ps( v3, mul );
+
+ // Round to nearest integer
+ v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
+ v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
+ v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
+ v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
+
+ // Convert floats to integers
+ __m256i i0 = _mm256_cvtps_epi32( v0 );
+ __m256i i1 = _mm256_cvtps_epi32( v1 );
+ __m256i i2 = _mm256_cvtps_epi32( v2 );
+ __m256i i3 = _mm256_cvtps_epi32( v3 );
+
+#if defined(__AVX2__)
+ // Convert int32 to int16
+ i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
+ i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
+ // Convert int16 to int8
+ i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+
+ // We got our precious signed bytes, but the order is now wrong
+ // These AVX2 pack instructions process 16-byte pieces independently
+ // The following instruction is fixing the order
+ const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
+ i0 = _mm256_permutevar8x32_epi32( i0, perm );
+
+ _mm256_storeu_si256((__m256i *)y[i].qs, i0);
+#else
+ // Since we don't have in AVX some necessary functions,
+ // we split the registers in half and call AVX2 analogs from SSE
+ __m128i ni0 = _mm256_castsi256_si128( i0 );
+ __m128i ni1 = _mm256_extractf128_si256( i0, 1);
+ __m128i ni2 = _mm256_castsi256_si128( i1 );
+ __m128i ni3 = _mm256_extractf128_si256( i1, 1);
+ __m128i ni4 = _mm256_castsi256_si128( i2 );
+ __m128i ni5 = _mm256_extractf128_si256( i2, 1);
+ __m128i ni6 = _mm256_castsi256_si128( i3 );
+ __m128i ni7 = _mm256_extractf128_si256( i3, 1);
+
+ // Convert int32 to int16
+ ni0 = _mm_packs_epi32( ni0, ni1 );
+ ni2 = _mm_packs_epi32( ni2, ni3 );
+ ni4 = _mm_packs_epi32( ni4, ni5 );
+ ni6 = _mm_packs_epi32( ni6, ni7 );
+ // Convert int16 to int8
+ ni0 = _mm_packs_epi16( ni0, ni2 );
+ ni4 = _mm_packs_epi16( ni4, ni6 );
+
+ _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
+ _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
+#endif
+ }
+#elif defined(__riscv_v_intrinsic)
+
+ size_t vl = __riscv_vsetvl_e32m4(QK8_0);
+
+ for (int i = 0; i < nb; i++) {
+ // load elements
+ vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_0, vl);
+
+ vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
+ vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0f, vl);
+ vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
+ float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_FP32_TO_FP16(d);
+
+ vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
+
+ // convert to integer
+ vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
+ vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
+
+ // store result
+ __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
+ }
+#else
+ GGML_UNUSED(nb);
+ // scalar
+ quantize_row_q8_0_reference(x, y, k);
+#endif
+}
+
+// reference implementation for deterministic creation of model files
+void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
+ assert(QK8_1 == 32);
+ assert(k % QK8_1 == 0);
+ const int nb = k / QK8_1;
+
+ for (int i = 0; i < nb; i++) {
+ float amax = 0.0f; // absolute max
+
+ for (int j = 0; j < QK8_1; j++) {
+ const float v = x[i*QK8_1 + j];
+ amax = MAX(amax, fabsf(v));
+ }
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = d;
+
+ int sum = 0;
+
+ for (int j = 0; j < QK8_1/2; ++j) {
+ const float v0 = x[i*QK8_1 + j]*id;
+ const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
+
+ y[i].qs[ j] = roundf(v0);
+ y[i].qs[QK8_1/2 + j] = roundf(v1);
+
+ sum += y[i].qs[ j];
+ sum += y[i].qs[QK8_1/2 + j];
+ }
+
+ y[i].s = sum*d;
+ }
+}
+
+void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
+ assert(k % QK8_1 == 0);
+ const int nb = k / QK8_1;
+
+ block_q8_1 * restrict y = vy;
+
+#if defined(__ARM_NEON)
+ for (int i = 0; i < nb; i++) {
+ float32x4_t srcv [8];
+ float32x4_t asrcv[8];
+ float32x4_t amaxv[8];
+
+ for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
+ for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
+
+ for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
+ for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
+ for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
+
+ const float amax = vmaxvq_f32(amaxv[0]);
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = d;
+
+ int32x4_t accv = vdupq_n_s32(0);
+
+ for (int j = 0; j < 8; j++) {
+ const float32x4_t v = vmulq_n_f32(srcv[j], id);
+ const int32x4_t vi = vcvtnq_s32_f32(v);
+
+ y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
+ y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
+ y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
+ y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
+
+ accv = vaddq_s32(accv, vi);
+ }
+
+ y[i].s = d * vaddvq_s32(accv);
+ }
+#elif defined(__wasm_simd128__)
+ for (int i = 0; i < nb; i++) {
+ v128_t srcv [8];
+ v128_t asrcv[8];
+ v128_t amaxv[8];
+
+ for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
+ for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
+
+ for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
+ for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
+ for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
+
+ const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
+ wasm_f32x4_extract_lane(amaxv[0], 1)),
+ MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
+ wasm_f32x4_extract_lane(amaxv[0], 3)));
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = d;
+
+ v128_t accv = wasm_i32x4_splat(0);
+
+ for (int j = 0; j < 8; j++) {
+ const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
+ const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
+
+ y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
+ y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
+ y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
+ y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
+
+ accv = wasm_i32x4_add(accv, vi);
+ }
+
+ y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) +
+ wasm_i32x4_extract_lane(accv, 1) +
+ wasm_i32x4_extract_lane(accv, 2) +
+ wasm_i32x4_extract_lane(accv, 3));
+ }
+#elif defined(__AVX2__) || defined(__AVX__)
+ for (int i = 0; i < nb; i++) {
+ // Load elements into 4 AVX vectors
+ __m256 v0 = _mm256_loadu_ps( x );
+ __m256 v1 = _mm256_loadu_ps( x + 8 );
+ __m256 v2 = _mm256_loadu_ps( x + 16 );
+ __m256 v3 = _mm256_loadu_ps( x + 24 );
+ x += 32;
+
+ // Compute max(abs(e)) for the block
+ const __m256 signBit = _mm256_set1_ps( -0.0f );
+ __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
+ maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
+
+ __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
+ max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
+ max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
+ const float maxScalar = _mm_cvtss_f32( max4 );
+
+ // Quantize these floats
+ const float d = maxScalar / 127.f;
+ y[i].d = d;
+ const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
+ const __m256 mul = _mm256_set1_ps( id );
+
+ // Apply the multiplier
+ v0 = _mm256_mul_ps( v0, mul );
+ v1 = _mm256_mul_ps( v1, mul );
+ v2 = _mm256_mul_ps( v2, mul );
+ v3 = _mm256_mul_ps( v3, mul );
+
+ // Round to nearest integer
+ v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
+ v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
+ v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
+ v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
+
+ // Convert floats to integers
+ __m256i i0 = _mm256_cvtps_epi32( v0 );
+ __m256i i1 = _mm256_cvtps_epi32( v1 );
+ __m256i i2 = _mm256_cvtps_epi32( v2 );
+ __m256i i3 = _mm256_cvtps_epi32( v3 );
+
+#if defined(__AVX2__)
+ // Compute the sum of the quants and set y[i].s
+ y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
+
+ // Convert int32 to int16
+ i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
+ i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
+ // Convert int16 to int8
+ i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
+
+ // We got our precious signed bytes, but the order is now wrong
+ // These AVX2 pack instructions process 16-byte pieces independently
+ // The following instruction is fixing the order
+ const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
+ i0 = _mm256_permutevar8x32_epi32( i0, perm );
+
+ _mm256_storeu_si256((__m256i *)y[i].qs, i0);
+#else
+ // Since we don't have in AVX some necessary functions,
+ // we split the registers in half and call AVX2 analogs from SSE
+ __m128i ni0 = _mm256_castsi256_si128( i0 );
+ __m128i ni1 = _mm256_extractf128_si256( i0, 1);
+ __m128i ni2 = _mm256_castsi256_si128( i1 );
+ __m128i ni3 = _mm256_extractf128_si256( i1, 1);
+ __m128i ni4 = _mm256_castsi256_si128( i2 );
+ __m128i ni5 = _mm256_extractf128_si256( i2, 1);
+ __m128i ni6 = _mm256_castsi256_si128( i3 );
+ __m128i ni7 = _mm256_extractf128_si256( i3, 1);
+
+ // Compute the sum of the quants and set y[i].s
+ const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
+ const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
+ y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
+
+ // Convert int32 to int16
+ ni0 = _mm_packs_epi32( ni0, ni1 );
+ ni2 = _mm_packs_epi32( ni2, ni3 );
+ ni4 = _mm_packs_epi32( ni4, ni5 );
+ ni6 = _mm_packs_epi32( ni6, ni7 );
+ // Convert int16 to int8
+ ni0 = _mm_packs_epi16( ni0, ni2 );
+ ni4 = _mm_packs_epi16( ni4, ni6 );
+
+ _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
+ _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
+#endif
+ }
+#elif defined(__riscv_v_intrinsic)
+
+ size_t vl = __riscv_vsetvl_e32m4(QK8_1);
+
+ for (int i = 0; i < nb; i++) {
+ // load elements
+ vfloat32m4_t v_x = __riscv_vle32_v_f32m4(x+i*QK8_1, vl);
+
+ vfloat32m4_t vfabs = __riscv_vfabs_v_f32m4(v_x, vl);
+ vfloat32m1_t tmp = __riscv_vfmv_v_f_f32m1(0.0, vl);
+ vfloat32m1_t vmax = __riscv_vfredmax_vs_f32m4_f32m1(vfabs, tmp, vl);
+ float amax = __riscv_vfmv_f_s_f32m1_f32(vmax);
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = d;
+
+ vfloat32m4_t x0 = __riscv_vfmul_vf_f32m4(v_x, id, vl);
+
+ // convert to integer
+ vint16m2_t vi = __riscv_vfncvt_x_f_w_i16m2(x0, vl);
+ vint8m1_t vs = __riscv_vncvt_x_x_w_i8m1(vi, vl);
+
+ // store result
+ __riscv_vse8_v_i8m1(y[i].qs , vs, vl);
+
+ // compute sum for y[i].s
+ vint16m1_t tmp2 = __riscv_vmv_v_x_i16m1(0, vl);
+ vint16m1_t vwrs = __riscv_vwredsum_vs_i8m1_i16m1(vs, tmp2, vl);
+
+ // set y[i].s
+ int sum = __riscv_vmv_x_s_i16m1_i16(vwrs);
+ y[i].s = sum*d;
+ }
+#else
+ GGML_UNUSED(nb);
+ // scalar
+ quantize_row_q8_1_reference(x, y, k);
+#endif
+}
+
+void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
+ static const int qk = QK4_0;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+
+ for (int j = 0; j < qk/2; ++j) {
+ const int x0 = (x[i].qs[j] & 0x0F) - 8;
+ const int x1 = (x[i].qs[j] >> 4) - 8;
+
+ y[i*qk + j + 0 ] = x0*d;
+ y[i*qk + j + qk/2] = x1*d;
+ }
+ }
+}
+
+void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
+ static const int qk = QK4_1;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+ const float m = GGML_FP16_TO_FP32(x[i].m);
+
+ for (int j = 0; j < qk/2; ++j) {
+ const int x0 = (x[i].qs[j] & 0x0F);
+ const int x1 = (x[i].qs[j] >> 4);
+
+ y[i*qk + j + 0 ] = x0*d + m;
+ y[i*qk + j + qk/2] = x1*d + m;
+ }
+ }
+}
+
+void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
+ static const int qk = QK5_0;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+
+ uint32_t qh;
+ memcpy(&qh, x[i].qh, sizeof(qh));
+
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
+ const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
+
+ const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
+ const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
+
+ y[i*qk + j + 0 ] = x0*d;
+ y[i*qk + j + qk/2] = x1*d;
+ }
+ }
+}
+
+void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
+ static const int qk = QK5_1;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+ const float m = GGML_FP16_TO_FP32(x[i].m);
+
+ uint32_t qh;
+ memcpy(&qh, x[i].qh, sizeof(qh));
+
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
+ const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
+
+ const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
+ const int x1 = (x[i].qs[j] >> 4) | xh_1;
+
+ y[i*qk + j + 0 ] = x0*d + m;
+ y[i*qk + j + qk/2] = x1*d + m;
+ }
+ }
+}
+
+void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k) {
+ static const int qk = QK8_0;
+
+ assert(k % qk == 0);
+
+ const int nb = k / qk;
+
+ for (int i = 0; i < nb; i++) {
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+
+ for (int j = 0; j < qk; ++j) {
+ y[i*qk + j] = x[i].qs[j]*d;
+ }
+ }
+}
+
+//
+// 2-6 bit quantization in super-blocks
+//
+
+//
+// ===================== Helper functions
+//
+static inline int nearest_int(float fval) {
+ assert(fval <= 4194303.f);
+ float val = fval + 12582912.f;
+ int i; memcpy(&i, &val, sizeof(int));
+ return (i & 0x007fffff) - 0x00400000;
+}
+
+static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, int rmse_type) {
+ float max = 0;
+ float amax = 0;
+ for (int i = 0; i < n; ++i) {
+ float ax = fabsf(x[i]);
+ if (ax > amax) { amax = ax; max = x[i]; }
+ }
+ if (amax < 1e-30f) { // all zero
+ for (int i = 0; i < n; ++i) {
+ L[i] = 0;
+ }
+ return 0.f;
+ }
+ float iscale = -nmax / max;
+ if (rmse_type == 0) {
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
+ }
+ return 1/iscale;
+ }
+ bool return_early = false;
+ if (rmse_type < 0) {
+ rmse_type = -rmse_type;
+ return_early = true;
+ }
+ int weight_type = rmse_type%2;
+ float sumlx = 0;
+ float suml2 = 0;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ l = MAX(-nmax, MIN(nmax-1, l));
+ L[i] = l + nmax;
+ float w = weight_type == 1 ? x[i] * x[i] : 1;
+ sumlx += w*x[i]*l;
+ suml2 += w*l*l;
+ }
+ float scale = sumlx/suml2;
+ if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale;
+ float best = scale * sumlx;
+ for (int is = -9; is <= 9; ++is) {
+ if (is == 0) {
+ continue;
+ }
+ iscale = -(nmax + 0.1f*is) / max;
+ sumlx = suml2 = 0;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ l = MAX(-nmax, MIN(nmax-1, l));
+ float w = weight_type == 1 ? x[i] * x[i] : 1;
+ sumlx += w*x[i]*l;
+ suml2 += w*l*l;
+ }
+ if (suml2 > 0 && sumlx*sumlx > best*suml2) {
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
+ }
+ scale = sumlx/suml2; best = scale*sumlx;
+ }
+ }
+ return scale;
+}
+
+static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t * restrict L, bool do_rmse) {
+ float max = 0;
+ float amax = 0;
+ for (int i = 0; i < n; ++i) {
+ float ax = fabsf(x[i]);
+ if (ax > amax) { amax = ax; max = x[i]; }
+ }
+ if (!amax) { // all zero
+ for (int i = 0; i < n; ++i) { L[i] = 0; }
+ return 0.f;
+ }
+ float iscale = -nmax / max;
+ if (do_rmse) {
+ float sumlx = 0;
+ float suml2 = 0;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ l = MAX(-nmax, MIN(nmax-1, l));
+ L[i] = l;
+ float w = x[i]*x[i];
+ sumlx += w*x[i]*l;
+ suml2 += w*l*l;
+ }
+ for (int itry = 0; itry < 5; ++itry) {
+ int n_changed = 0;
+ for (int i = 0; i < n; ++i) {
+ float w = x[i]*x[i];
+ float slx = sumlx - w*x[i]*L[i];
+ if (slx > 0) {
+ float sl2 = suml2 - w*L[i]*L[i];
+ int new_l = nearest_int(x[i] * sl2 / slx);
+ new_l = MAX(-nmax, MIN(nmax-1, new_l));
+ if (new_l != L[i]) {
+ slx += w*x[i]*new_l;
+ sl2 += w*new_l*new_l;
+ if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) {
+ L[i] = new_l; sumlx = slx; suml2 = sl2;
+ ++n_changed;
+ }
+ }
+ }
+ }
+ if (!n_changed) {
+ break;
+ }
+ }
+ for (int i = 0; i < n; ++i) {
+ L[i] += nmax;
+ }
+ return sumlx / suml2;
+ }
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ l = MAX(-nmax, MIN(nmax-1, l));
+ L[i] = l + nmax;
+ }
+ return 1/iscale;
+}
+
+static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
+ int ntry, float alpha) {
+ float min = x[0];
+ float max = x[0];
+ for (int i = 1; i < n; ++i) {
+ if (x[i] < min) min = x[i];
+ if (x[i] > max) max = x[i];
+ }
+ if (max == min) {
+ for (int i = 0; i < n; ++i) L[i] = 0;
+ *the_min = 0;
+ return 0.f;
+ }
+ if (min > 0) min = 0;
+ float iscale = nmax/(max - min);
+ float scale = 1/iscale;
+ for (int itry = 0; itry < ntry; ++itry) {
+ float sumlx = 0; int suml2 = 0;
+ bool did_change = false;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale*(x[i] - min));
+ l = MAX(0, MIN(nmax, l));
+ if (l != L[i]) {
+ L[i] = l;
+ did_change = true;
+ }
+ sumlx += (x[i] - min)*l;
+ suml2 += l*l;
+ }
+ scale = sumlx/suml2;
+ float sum = 0;
+ for (int i = 0; i < n; ++i) {
+ sum += x[i] - scale*L[i];
+ }
+ min = alpha*min + (1 - alpha)*sum/n;
+ if (min > 0) min = 0;
+ iscale = 1/scale;
+ if (!did_change) break;
+ }
+ *the_min = -min;
+ return scale;
+}
+
+static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
+ uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
+ float rmin, float rdelta, int nstep, bool use_mad) {
+ float min = x[0];
+ float max = x[0];
+ float sum_w = weights[0];
+ float sum_x = sum_w * x[0];
+ for (int i = 1; i < n; ++i) {
+ if (x[i] < min) min = x[i];
+ if (x[i] > max) max = x[i];
+ float w = weights[i];
+ sum_w += w;
+ sum_x += w * x[i];
+ }
+ if (min > 0) min = 0;
+ if (max == min) {
+ for (int i = 0; i < n; ++i) L[i] = 0;
+ *the_min = -min;
+ return 0.f;
+ }
+ float iscale = nmax/(max - min);
+ float scale = 1/iscale;
+ float best_mad = 0;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale*(x[i] - min));
+ L[i] = MAX(0, MIN(nmax, l));
+ float diff = scale * L[i] + min - x[i];
+ diff = use_mad ? fabsf(diff) : diff * diff;
+ float w = weights[i];
+ best_mad += w * diff;
+ }
+ if (nstep < 1) {
+ *the_min = -min;
+ return scale;
+ }
+ for (int is = 0; is <= nstep; ++is) {
+ iscale = (rmin + rdelta*is + nmax)/(max - min);
+ float sum_l = 0, sum_l2 = 0, sum_xl = 0;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale*(x[i] - min));
+ l = MAX(0, MIN(nmax, l));
+ Laux[i] = l;
+ float w = weights[i];
+ sum_l += w*l;
+ sum_l2 += w*l*l;
+ sum_xl += w*l*x[i];
+ }
+ float D = sum_w * sum_l2 - sum_l * sum_l;
+ if (D > 0) {
+ float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
+ float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
+ if (this_min > 0) {
+ this_min = 0;
+ this_scale = sum_xl / sum_l2;
+ }
+ float mad = 0;
+ for (int i = 0; i < n; ++i) {
+ float diff = this_scale * Laux[i] + this_min - x[i];
+ diff = use_mad ? fabsf(diff) : diff * diff;
+ float w = weights[i];
+ mad += w * diff;
+ }
+ if (mad < best_mad) {
+ for (int i = 0; i < n; ++i) {
+ L[i] = Laux[i];
+ }
+ best_mad = mad;
+ scale = this_scale;
+ min = this_min;
+ }
+ }
+ }
+ *the_min = -min;
+ return scale;
+}
+
+#if QK_K == 256
+static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) {
+ if (j < 4) {
+ *d = q[j] & 63; *m = q[j + 4] & 63;
+ } else {
+ *d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
+ *m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
+ }
+}
+#endif
+
+//========================- 2-bit (de)-quantization
+
+void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ uint8_t L[QK_K];
+ uint8_t Laux[16];
+ float weights[16];
+ float mins[QK_K/16];
+ float scales[QK_K/16];
+
+ const float q4scale = 15.f;
+
+ for (int i = 0; i < nb; i++) {
+ float max_scale = 0; // as we are deducting the min, scales are always positive
+ float max_min = 0;
+ for (int j = 0; j < QK_K/16; ++j) {
+ for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]);
+ scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true);
+ float scale = scales[j];
+ if (scale > max_scale) {
+ max_scale = scale;
+ }
+ float min = mins[j];
+ if (min > max_min) {
+ max_min = min;
+ }
+ }
+
+ if (max_scale > 0) {
+ float iscale = q4scale/max_scale;
+ for (int j = 0; j < QK_K/16; ++j) {
+ int l = nearest_int(iscale*scales[j]);
+ y[i].scales[j] = l;
+ }
+ y[i].d = GGML_FP32_TO_FP16(max_scale/q4scale);
+ } else {
+ for (int j = 0; j < QK_K/16; ++j) y[i].scales[j] = 0;
+ y[i].d = GGML_FP32_TO_FP16(0.f);
+ }
+ if (max_min > 0) {
+ float iscale = q4scale/max_min;
+ for (int j = 0; j < QK_K/16; ++j) {
+ int l = nearest_int(iscale*mins[j]);
+ y[i].scales[j] |= (l << 4);
+ }
+ y[i].dmin = GGML_FP32_TO_FP16(max_min/q4scale);
+ } else {
+ y[i].dmin = GGML_FP32_TO_FP16(0.f);
+ }
+ for (int j = 0; j < QK_K/16; ++j) {
+ const float d = GGML_FP16_TO_FP32(y[i].d) * (y[i].scales[j] & 0xF);
+ if (!d) continue;
+ const float dm = GGML_FP16_TO_FP32(y[i].dmin) * (y[i].scales[j] >> 4);
+ for (int ii = 0; ii < 16; ++ii) {
+ int l = nearest_int((x[16*j + ii] + dm)/d);
+ l = MAX(0, MIN(3, l));
+ L[16*j + ii] = l;
+ }
+ }
+
+#if QK_K == 256
+ for (int j = 0; j < QK_K; j += 128) {
+ for (int l = 0; l < 32; ++l) {
+ y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
+ }
+ }
+#else
+ for (int l = 0; l < 16; ++l) {
+ y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
+ }
+#endif
+
+ x += QK_K;
+
+ }
+}
+
+void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+ const float min = GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * q = x[i].qs;
+
+#if QK_K == 256
+ int is = 0;
+ float dl, ml;
+ for (int n = 0; n < QK_K; n += 128) {
+ int shift = 0;
+ for (int j = 0; j < 4; ++j) {
+
+ uint8_t sc = x[i].scales[is++];
+ dl = d * (sc & 0xF); ml = min * (sc >> 4);
+ for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l] >> shift) & 3)) - ml;
+
+ sc = x[i].scales[is++];
+ dl = d * (sc & 0xF); ml = min * (sc >> 4);
+ for (int l = 0; l < 16; ++l) *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3)) - ml;
+
+ shift += 2;
+ }
+ q += 32;
+ }
+#else
+ float dl1 = d * (x[i].scales[0] & 0xF), ml1 = min * (x[i].scales[0] >> 4);
+ float dl2 = d * (x[i].scales[1] & 0xF), ml2 = min * (x[i].scales[1] >> 4);
+ float dl3 = d * (x[i].scales[2] & 0xF), ml3 = min * (x[i].scales[2] >> 4);
+ float dl4 = d * (x[i].scales[3] & 0xF), ml4 = min * (x[i].scales[3] >> 4);
+ for (int l = 0; l < 16; ++l) {
+ y[l+ 0] = dl1 * ((int8_t)((q[l] >> 0) & 3)) - ml1;
+ y[l+16] = dl2 * ((int8_t)((q[l] >> 2) & 3)) - ml2;
+ y[l+32] = dl3 * ((int8_t)((q[l] >> 4) & 3)) - ml3;
+ y[l+48] = dl4 * ((int8_t)((q[l] >> 6) & 3)) - ml4;
+ }
+ y += QK_K;
+#endif
+ }
+}
+
+void quantize_row_q2_K(const float * restrict x, void * restrict vy, int k) {
+ quantize_row_q2_K_reference(x, vy, k);
+}
+
+size_t ggml_quantize_q2_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
+ (void)hist; // TODO: collect histograms
+
+ for (int j = 0; j < n; j += k) {
+ block_q2_K * restrict y = (block_q2_K *)dst + j/QK_K;
+ quantize_row_q2_K_reference(src + j, y, k);
+ }
+ return (n/QK_K*sizeof(block_q2_K));
+}
+
+//========================= 3-bit (de)-quantization
+
+void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ int8_t L[QK_K];
+ float scales[QK_K / 16];
+
+ for (int i = 0; i < nb; i++) {
+
+ float max_scale = 0;
+ float amax = 0;
+ for (int j = 0; j < QK_K/16; ++j) {
+ scales[j] = make_q3_quants(16, 4, x + 16*j, L + 16*j, true);
+ float scale = fabsf(scales[j]);
+ if (scale > amax) {
+ amax = scale; max_scale = scales[j];
+ }
+ }
+
+#if QK_K == 256
+ memset(y[i].scales, 0, 12);
+ if (max_scale) {
+ float iscale = -32.f/max_scale;
+ for (int j = 0; j < QK_K/16; ++j) {
+ int8_t l = nearest_int(iscale*scales[j]);
+ l = MAX(-32, MIN(31, l)) + 32;
+ if (j < 8) {
+ y[i].scales[j] = l & 0xF;
+ } else {
+ y[i].scales[j-8] |= ((l & 0xF) << 4);
+ }
+ l >>= 4;
+ y[i].scales[j%4 + 8] |= (l << (2*(j/4)));
+ }
+ y[i].d = GGML_FP32_TO_FP16(1/iscale);
+ } else {
+ y[i].d = GGML_FP32_TO_FP16(0.f);
+ }
+
+ int8_t sc;
+ for (int j = 0; j < QK_K/16; ++j) {
+ sc = j < 8 ? y[i].scales[j] & 0xF : y[i].scales[j-8] >> 4;
+ sc = (sc | (((y[i].scales[8 + j%4] >> (2*(j/4))) & 3) << 4)) - 32;
+ float d = GGML_FP16_TO_FP32(y[i].d) * sc;
+ if (!d) {
+ continue;
+ }
+ for (int ii = 0; ii < 16; ++ii) {
+ int l = nearest_int(x[16*j + ii]/d);
+ l = MAX(-4, MIN(3, l));
+ L[16*j + ii] = l + 4;
+ }
+ }
+#else
+ if (max_scale) {
+ float iscale = -8.f/max_scale;
+ for (int j = 0; j < QK_K/16; j+=2) {
+ int l1 = nearest_int(iscale*scales[j]);
+ l1 = 8 + MAX(-8, MIN(7, l1));
+ int l2 = nearest_int(iscale*scales[j+1]);
+ l2 = 8 + MAX(-8, MIN(7, l2));
+ y[i].scales[j/2] = l1 | (l2 << 4);
+ }
+ y[i].d = GGML_FP32_TO_FP16(1/iscale);
+ } else {
+ for (int j = 0; j < QK_K/16; j+=2) {
+ y[i].scales[j/2] = 0;
+ }
+ y[i].d = GGML_FP32_TO_FP16(0.f);
+ }
+ for (int j = 0; j < QK_K/16; ++j) {
+ int s = j%2 == 0 ? y[i].scales[j/2] & 0xF : y[i].scales[j/2] >> 4;
+ float d = GGML_FP16_TO_FP32(y[i].d) * (s - 8);
+ if (!d) {
+ continue;
+ }
+ for (int ii = 0; ii < 16; ++ii) {
+ int l = nearest_int(x[16*j + ii]/d);
+ l = MAX(-4, MIN(3, l));
+ L[16*j + ii] = l + 4;
+ }
+ }
+#endif
+
+ memset(y[i].hmask, 0, QK_K/8);
+ // We put the high-bit for the 1st 8 quants into bit 0, the next 8 into bit 1, etc.
+ int m = 0;
+ uint8_t hm = 1;
+ for (int j = 0; j < QK_K; ++j) {
+ if (L[j] > 3) {
+ y[i].hmask[m] |= hm;
+ L[j] -= 4;
+ }
+ if (++m == QK_K/8) {
+ m = 0; hm <<= 1;
+ }
+ }
+#if QK_K == 256
+ for (int j = 0; j < QK_K; j += 128) {
+ for (int l = 0; l < 32; ++l) {
+ y[i].qs[j/4 + l] = L[j + l] | (L[j + l + 32] << 2) | (L[j + l + 64] << 4) | (L[j + l + 96] << 6);
+ }
+ }
+#else
+ for (int l = 0; l < 16; ++l) {
+ y[i].qs[l] = L[l] | (L[l + 16] << 2) | (L[l + 32] << 4) | (L[l + 48] << 6);
+ }
+#endif
+
+ x += QK_K;
+ }
+}
+
+#if QK_K == 256
+void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ const uint32_t kmask1 = 0x03030303;
+ const uint32_t kmask2 = 0x0f0f0f0f;
+
+ uint32_t aux[4];
+ const int8_t * scales = (const int8_t*)aux;
+
+ for (int i = 0; i < nb; i++) {
+
+ const float d_all = GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q = x[i].qs;
+ const uint8_t * restrict hm = x[i].hmask;
+ uint8_t m = 1;
+
+ memcpy(aux, x[i].scales, 12);
+ uint32_t tmp = aux[2];
+ aux[2] = ((aux[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
+ aux[3] = ((aux[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
+ aux[0] = (aux[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
+ aux[1] = (aux[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
+
+ int is = 0;
+ float dl;
+ for (int n = 0; n < QK_K; n += 128) {
+ int shift = 0;
+ for (int j = 0; j < 4; ++j) {
+
+ dl = d_all * (scales[is++] - 32);
+ for (int l = 0; l < 16; ++l) {
+ *y++ = dl * ((int8_t)((q[l+ 0] >> shift) & 3) - ((hm[l+ 0] & m) ? 0 : 4));
+ }
+
+ dl = d_all * (scales[is++] - 32);
+ for (int l = 0; l < 16; ++l) {
+ *y++ = dl * ((int8_t)((q[l+16] >> shift) & 3) - ((hm[l+16] & m) ? 0 : 4));
+ }
+
+ shift += 2;
+ m <<= 1;
+ }
+ q += 32;
+ }
+
+ }
+}
+#else
+void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ assert(QK_K == 64);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ const float d_all = GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q = x[i].qs;
+ const uint8_t * restrict hm = x[i].hmask;
+
+ const float d1 = d_all * ((x[i].scales[0] & 0xF) - 8);
+ const float d2 = d_all * ((x[i].scales[0] >> 4) - 8);
+ const float d3 = d_all * ((x[i].scales[1] & 0xF) - 8);
+ const float d4 = d_all * ((x[i].scales[1] >> 4) - 8);
+
+ for (int l=0; l<8; ++l) {
+ uint8_t h = hm[l];
+ y[l+ 0] = d1 * ((int8_t)((q[l+0] >> 0) & 3) - ((h & 0x01) ? 0 : 4));
+ y[l+ 8] = d1 * ((int8_t)((q[l+8] >> 0) & 3) - ((h & 0x02) ? 0 : 4));
+ y[l+16] = d2 * ((int8_t)((q[l+0] >> 2) & 3) - ((h & 0x04) ? 0 : 4));
+ y[l+24] = d2 * ((int8_t)((q[l+8] >> 2) & 3) - ((h & 0x08) ? 0 : 4));
+ y[l+32] = d3 * ((int8_t)((q[l+0] >> 4) & 3) - ((h & 0x10) ? 0 : 4));
+ y[l+40] = d3 * ((int8_t)((q[l+8] >> 4) & 3) - ((h & 0x20) ? 0 : 4));
+ y[l+48] = d4 * ((int8_t)((q[l+0] >> 6) & 3) - ((h & 0x40) ? 0 : 4));
+ y[l+56] = d4 * ((int8_t)((q[l+8] >> 6) & 3) - ((h & 0x80) ? 0 : 4));
+ }
+ y += QK_K;
+ }
+}
+#endif
+
+void quantize_row_q3_K(const float * restrict x, void * restrict vy, int k) {
+ quantize_row_q3_K_reference(x, vy, k);
+}
+
+size_t ggml_quantize_q3_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
+ (void)hist; // TODO: collect histograms
+
+ for (int j = 0; j < n; j += k) {
+ block_q3_K * restrict y = (block_q3_K *)dst + j/QK_K;
+ quantize_row_q3_K_reference(src + j, y, k);
+ }
+ return (n/QK_K*sizeof(block_q3_K));
+}
+
+// ====================== 4-bit (de)-quantization
+
+void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ uint8_t L[QK_K];
+ uint8_t Laux[32];
+ float weights[32];
+ float mins[QK_K/32];
+ float scales[QK_K/32];
+
+ for (int i = 0; i < nb; i++) {
+
+ float max_scale = 0; // as we are deducting the min, scales are always positive
+ float max_min = 0;
+ for (int j = 0; j < QK_K/32; ++j) {
+ //scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
+ float sum_x2 = 0;
+ for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
+ float av_x = sqrtf(sum_x2/32);
+ for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
+ scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false);
+ float scale = scales[j];
+ if (scale > max_scale) {
+ max_scale = scale;
+ }
+ float min = mins[j];
+ if (min > max_min) {
+ max_min = min;
+ }
+ }
+
+#if QK_K == 256
+ float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
+ float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
+ for (int j = 0; j < QK_K/32; ++j) {
+ uint8_t ls = nearest_int(inv_scale*scales[j]);
+ uint8_t lm = nearest_int(inv_min*mins[j]);
+ ls = MIN(63, ls);
+ lm = MIN(63, lm);
+ if (j < 4) {
+ y[i].scales[j] = ls;
+ y[i].scales[j+4] = lm;
+ } else {
+ y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
+ y[i].scales[j-4] |= ((ls >> 4) << 6);
+ y[i].scales[j-0] |= ((lm >> 4) << 6);
+ }
+ }
+ y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
+ y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
+
+ uint8_t sc, m;
+ for (int j = 0; j < QK_K/32; ++j) {
+ get_scale_min_k4(j, y[i].scales, &sc, &m);
+ const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
+ if (!d) continue;
+ const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
+ for (int ii = 0; ii < 32; ++ii) {
+ int l = nearest_int((x[32*j + ii] + dm)/d);
+ l = MAX(0, MIN(15, l));
+ L[32*j + ii] = l;
+ }
+ }
+#else
+ const float s_factor = 15.f;
+ float inv_scale = max_scale > 0 ? s_factor/max_scale : 0.f;
+ float inv_min = max_min > 0 ? s_factor/max_min : 0.f;
+ int d1 = nearest_int(inv_scale*scales[0]);
+ int m1 = nearest_int(inv_min*mins[0]);
+ int d2 = nearest_int(inv_scale*scales[1]);
+ int m2 = nearest_int(inv_min*mins[1]);
+ y[i].scales[0] = d1 | (m1 << 4);
+ y[i].scales[1] = d2 | (m2 << 4);
+ y[i].d[0] = GGML_FP32_TO_FP16(max_scale/s_factor);
+ y[i].d[1] = GGML_FP32_TO_FP16(max_min/s_factor);
+
+ float sumlx = 0;
+ int suml2 = 0;
+ for (int j = 0; j < QK_K/32; ++j) {
+ const uint8_t sd = y[i].scales[j] & 0xF;
+ const uint8_t sm = y[i].scales[j] >> 4;
+ const float d = GGML_FP16_TO_FP32(y[i].d[0]) * sd;
+ if (!d) continue;
+ const float m = GGML_FP16_TO_FP32(y[i].d[1]) * sm;
+ for (int ii = 0; ii < 32; ++ii) {
+ int l = nearest_int((x[32*j + ii] + m)/d);
+ l = MAX(0, MIN(15, l));
+ L[32*j + ii] = l;
+ sumlx += (x[32*j + ii] + m)*l*sd;
+ suml2 += l*l*sd*sd;
+ }
+ }
+ if (suml2) {
+ y[i].d[0] = GGML_FP32_TO_FP16(sumlx/suml2);
+ }
+#endif
+ uint8_t * q = y[i].qs;
+ for (int j = 0; j < QK_K; j += 64) {
+ for (int l = 0; l < 32; ++l) q[l] = L[j + l] | (L[j + l + 32] << 4);
+ q += 32;
+ }
+
+ x += QK_K;
+
+ }
+}
+
+void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ const uint8_t * q = x[i].qs;
+
+#if QK_K == 256
+
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+ const float min = GGML_FP16_TO_FP32(x[i].dmin);
+
+ int is = 0;
+ uint8_t sc, m;
+ for (int j = 0; j < QK_K; j += 64) {
+ get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
+ const float d1 = d * sc; const float m1 = min * m;
+ get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
+ const float d2 = d * sc; const float m2 = min * m;
+ for (int l = 0; l < 32; ++l) *y++ = d1 * (q[l] & 0xF) - m1;
+ for (int l = 0; l < 32; ++l) *y++ = d2 * (q[l] >> 4) - m2;
+ q += 32; is += 2;
+ }
+#else
+ const float dall = GGML_FP16_TO_FP32(x[i].d[0]);
+ const float mall = GGML_FP16_TO_FP32(x[i].d[1]);
+ const float d1 = dall * (x[i].scales[0] & 0xF), m1 = mall * (x[i].scales[0] >> 4);
+ const float d2 = dall * (x[i].scales[1] & 0xF), m2 = mall * (x[i].scales[1] >> 4);
+ for (int l = 0; l < 32; ++l) {
+ y[l+ 0] = d1 * (q[l] & 0xF) - m1;
+ y[l+32] = d2 * (q[l] >> 4) - m2;
+ }
+ y += QK_K;
+#endif
+
+ }
+}
+
+void quantize_row_q4_K(const float * restrict x, void * restrict vy, int k) {
+ assert(k % QK_K == 0);
+ block_q4_K * restrict y = vy;
+ quantize_row_q4_K_reference(x, y, k);
+}
+
+size_t ggml_quantize_q4_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
+ assert(k % QK_K == 0);
+ (void)hist; // TODO: collect histograms
+
+ for (int j = 0; j < n; j += k) {
+ block_q4_K * restrict y = (block_q4_K *)dst + j/QK_K;
+ quantize_row_q4_K_reference(src + j, y, k);
+ }
+ return (n/QK_K*sizeof(block_q4_K));
+}
+
+// ====================== 5-bit (de)-quantization
+
+void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+#if QK_K == 256
+ uint8_t L[QK_K];
+ float mins[QK_K/32];
+ float scales[QK_K/32];
+ float weights[32];
+ uint8_t Laux[32];
+#else
+ int8_t L[QK_K];
+ float scales[QK_K/16];
+#endif
+
+ for (int i = 0; i < nb; i++) {
+
+#if QK_K == 256
+
+ float max_scale = 0; // as we are deducting the min, scales are always positive
+ float max_min = 0;
+ for (int j = 0; j < QK_K/32; ++j) {
+ //scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
+ float sum_x2 = 0;
+ for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
+ float av_x = sqrtf(sum_x2/32);
+ for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
+ scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false);
+ float scale = scales[j];
+ if (scale > max_scale) {
+ max_scale = scale;
+ }
+ float min = mins[j];
+ if (min > max_min) {
+ max_min = min;
+ }
+ }
+
+ float inv_scale = max_scale > 0 ? 63.f/max_scale : 0.f;
+ float inv_min = max_min > 0 ? 63.f/max_min : 0.f;
+ for (int j = 0; j < QK_K/32; ++j) {
+ uint8_t ls = nearest_int(inv_scale*scales[j]);
+ uint8_t lm = nearest_int(inv_min*mins[j]);
+ ls = MIN(63, ls);
+ lm = MIN(63, lm);
+ if (j < 4) {
+ y[i].scales[j] = ls;
+ y[i].scales[j+4] = lm;
+ } else {
+ y[i].scales[j+4] = (ls & 0xF) | ((lm & 0xF) << 4);
+ y[i].scales[j-4] |= ((ls >> 4) << 6);
+ y[i].scales[j-0] |= ((lm >> 4) << 6);
+ }
+ }
+ y[i].d = GGML_FP32_TO_FP16(max_scale/63.f);
+ y[i].dmin = GGML_FP32_TO_FP16(max_min/63.f);
+
+ uint8_t sc, m;
+ for (int j = 0; j < QK_K/32; ++j) {
+ get_scale_min_k4(j, y[i].scales, &sc, &m);
+ const float d = GGML_FP16_TO_FP32(y[i].d) * sc;
+ if (!d) continue;
+ const float dm = GGML_FP16_TO_FP32(y[i].dmin) * m;
+ for (int ii = 0; ii < 32; ++ii) {
+ int l = nearest_int((x[32*j + ii] + dm)/d);
+ l = MAX(0, MIN(31, l));
+ L[32*j + ii] = l;
+ }
+ }
+
+ uint8_t * restrict qh = y[i].qh;
+ uint8_t * restrict ql = y[i].qs;
+ memset(qh, 0, QK_K/8);
+
+ uint8_t m1 = 1, m2 = 2;
+ for (int n = 0; n < QK_K; n += 64) {
+ for (int j = 0; j < 32; ++j) {
+ int l1 = L[n + j];
+ if (l1 > 15) {
+ l1 -= 16; qh[j] |= m1;
+ }
+ int l2 = L[n + j + 32];
+ if (l2 > 15) {
+ l2 -= 16; qh[j] |= m2;
+ }
+ ql[j] = l1 | (l2 << 4);
+ }
+ m1 <<= 2; m2 <<= 2;
+ ql += 32;
+ }
+#else
+ float max_scale = 0, amax = 0;
+ for (int j = 0; j < QK_K/16; ++j) {
+ scales[j] = make_qx_quants(16, 16, x + 16*j, L + 16*j, 1);
+ float abs_scale = fabsf(scales[j]);
+ if (abs_scale > amax) {
+ amax = abs_scale;
+ max_scale = scales[j];
+ }
+ }
+
+ float iscale = -128.f/max_scale;
+ for (int j = 0; j < QK_K/16; ++j) {
+ int l = nearest_int(iscale*scales[j]);
+ y[i].scales[j] = MAX(-128, MIN(127, l));
+ }
+ y[i].d = GGML_FP32_TO_FP16(1/iscale);
+
+ for (int j = 0; j < QK_K/16; ++j) {
+ const float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
+ if (!d) continue;
+ for (int ii = 0; ii < 16; ++ii) {
+ int l = nearest_int(x[16*j + ii]/d);
+ l = MAX(-16, MIN(15, l));
+ L[16*j + ii] = l + 16;
+ }
+ }
+
+ uint8_t * restrict qh = y[i].qh;
+ uint8_t * restrict ql = y[i].qs;
+ memset(qh, 0, QK_K/8);
+
+ for (int j = 0; j < 32; ++j) {
+ int jm = j%8;
+ int is = j/8;
+ int l1 = L[j];
+ if (l1 > 15) {
+ l1 -= 16; qh[jm] |= (1 << is);
+ }
+ int l2 = L[j + 32];
+ if (l2 > 15) {
+ l2 -= 16; qh[jm] |= (1 << (4 + is));
+ }
+ ql[j] = l1 | (l2 << 4);
+ }
+#endif
+
+ x += QK_K;
+
+ }
+}
+
+void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ const uint8_t * ql = x[i].qs;
+ const uint8_t * qh = x[i].qh;
+
+#if QK_K == 256
+
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+ const float min = GGML_FP16_TO_FP32(x[i].dmin);
+
+ int is = 0;
+ uint8_t sc, m;
+ uint8_t u1 = 1, u2 = 2;
+ for (int j = 0; j < QK_K; j += 64) {
+ get_scale_min_k4(is + 0, x[i].scales, &sc, &m);
+ const float d1 = d * sc; const float m1 = min * m;
+ get_scale_min_k4(is + 1, x[i].scales, &sc, &m);
+ const float d2 = d * sc; const float m2 = min * m;
+ for (int l = 0; l < 32; ++l) *y++ = d1 * ((ql[l] & 0xF) + (qh[l] & u1 ? 16 : 0)) - m1;
+ for (int l = 0; l < 32; ++l) *y++ = d2 * ((ql[l] >> 4) + (qh[l] & u2 ? 16 : 0)) - m2;
+ ql += 32; is += 2;
+ u1 <<= 2; u2 <<= 2;
+ }
+#else
+ float d = GGML_FP16_TO_FP32(x[i].d);
+ const int8_t * restrict s = x[i].scales;
+ for (int l = 0; l < 8; ++l) {
+ y[l+ 0] = d * s[0] * ((ql[l+ 0] & 0xF) - (qh[l] & 0x01 ? 0 : 16));
+ y[l+ 8] = d * s[0] * ((ql[l+ 8] & 0xF) - (qh[l] & 0x02 ? 0 : 16));
+ y[l+16] = d * s[1] * ((ql[l+16] & 0xF) - (qh[l] & 0x04 ? 0 : 16));
+ y[l+24] = d * s[1] * ((ql[l+24] & 0xF) - (qh[l] & 0x08 ? 0 : 16));
+ y[l+32] = d * s[2] * ((ql[l+ 0] >> 4) - (qh[l] & 0x10 ? 0 : 16));
+ y[l+40] = d * s[2] * ((ql[l+ 8] >> 4) - (qh[l] & 0x20 ? 0 : 16));
+ y[l+48] = d * s[3] * ((ql[l+16] >> 4) - (qh[l] & 0x40 ? 0 : 16));
+ y[l+56] = d * s[3] * ((ql[l+24] >> 4) - (qh[l] & 0x80 ? 0 : 16));
+ }
+ y += QK_K;
+#endif
+ }
+}
+
+void quantize_row_q5_K(const float * restrict x, void * restrict vy, int k) {
+ assert(k % QK_K == 0);
+ block_q5_K * restrict y = vy;
+ quantize_row_q5_K_reference(x, y, k);
+}
+
+size_t ggml_quantize_q5_K(const float * restrict src, void * restrict dst, int n, int k, int64_t * restrict hist) {
+ assert(k % QK_K == 0);
+ (void)hist; // TODO: collect histograms
+
+ for (int j = 0; j < n; j += k) {
+ block_q5_K * restrict y = (block_q5_K *)dst + j/QK_K;
+ quantize_row_q5_K_reference(src + j, y, k);
+ }
+ return (n/QK_K*sizeof(block_q5_K));
+}
+
+// ====================== 6-bit (de)-quantization
+
+void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ int8_t L[QK_K];
+ float scales[QK_K/16];
+
+ for (int i = 0; i < nb; i++) {
+
+ float max_scale = 0;
+ float max_abs_scale = 0;
+
+ for (int ib = 0; ib < QK_K/16; ++ib) {
+
+ const float scale = make_qx_quants(16, 32, x + 16*ib, L + 16*ib, 1);
+ scales[ib] = scale;
+
+ const float abs_scale = fabsf(scale);
+ if (abs_scale > max_abs_scale) {
+ max_abs_scale = abs_scale;
+ max_scale = scale;
+ }
+
+ }
+
+ if (!max_abs_scale) {
+ memset(&y[i], 0, sizeof(block_q6_K));
+ y[i].d = GGML_FP32_TO_FP16(0.f);
+ x += QK_K;
+ continue;
+ }
+
+ float iscale = -128.f/max_scale;
+ y[i].d = GGML_FP32_TO_FP16(1/iscale);
+ for (int ib = 0; ib < QK_K/16; ++ib) {
+ y[i].scales[ib] = MIN(127, nearest_int(iscale*scales[ib]));
+ }
+
+ for (int j = 0; j < QK_K/16; ++j) {
+ float d = GGML_FP16_TO_FP32(y[i].d) * y[i].scales[j];
+ if (!d) {
+ continue;
+ }
+ for (int ii = 0; ii < 16; ++ii) {
+ int l = nearest_int(x[16*j + ii]/d);
+ l = MAX(-32, MIN(31, l));
+ L[16*j + ii] = l + 32;
+ }
+ }
+
+ uint8_t * restrict ql = y[i].ql;
+ uint8_t * restrict qh = y[i].qh;
+#if QK_K == 256
+ for (int j = 0; j < QK_K; j += 128) {
+ for (int l = 0; l < 32; ++l) {
+ const uint8_t q1 = L[j + l + 0] & 0xF;
+ const uint8_t q2 = L[j + l + 32] & 0xF;
+ const uint8_t q3 = L[j + l + 64] & 0xF;
+ const uint8_t q4 = L[j + l + 96] & 0xF;
+ ql[l+ 0] = q1 | (q3 << 4);
+ ql[l+32] = q2 | (q4 << 4);
+ qh[l] = (L[j + l] >> 4) | ((L[j + l + 32] >> 4) << 2) | ((L[j + l + 64] >> 4) << 4) | ((L[j + l + 96] >> 4) << 6);
+ }
+ ql += 64;
+ qh += 32;
+ }
+#else
+ for (int l = 0; l < 32; ++l) {
+ const uint8_t q1 = L[l + 0] & 0xF;
+ const uint8_t q2 = L[l + 32] & 0xF;
+ ql[l] = q1 | (q2 << 4);
+ }
+ for (int l = 0; l < 16; ++l) {
+ qh[l] = (L[l] >> 4) | ((L[l + 16] >> 4) << 2) | ((L[l + 32] >> 4) << 4) | ((L[l + 48] >> 4) << 6);
+ }
+#endif
+
+ x += QK_K;
+
+ }
+}
+
+void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict ql = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict sc = x[i].scales;
+
+#if QK_K == 256
+ for (int n = 0; n < QK_K; n += 128) {
+ for (int l = 0; l < 32; ++l) {
+ int is = l/16;
+ const int8_t q1 = (int8_t)((ql[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
+ const int8_t q2 = (int8_t)((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
+ const int8_t q3 = (int8_t)((ql[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
+ const int8_t q4 = (int8_t)((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
+ y[l + 0] = d * sc[is + 0] * q1;
+ y[l + 32] = d * sc[is + 2] * q2;
+ y[l + 64] = d * sc[is + 4] * q3;
+ y[l + 96] = d * sc[is + 6] * q4;
+ }
+ y += 128;
+ ql += 64;
+ qh += 32;
+ sc += 8;
+ }
+#else
+ for (int l = 0; l < 16; ++l) {
+ const int8_t q1 = (int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
+ const int8_t q2 = (int8_t)((ql[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
+ const int8_t q3 = (int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
+ const int8_t q4 = (int8_t)((ql[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
+ y[l+ 0] = d * sc[0] * q1;
+ y[l+16] = d * sc[1] * q2;
+ y[l+32] = d * sc[2] * q3;
+ y[l+48] = d * sc[3] * q4;
+ }
+ y += 64;
+#endif
+
+ }
+}
+
+void quantize_row_q6_K(const float * restrict x, void * restrict vy, int k) {
+ assert(k % QK_K == 0);
+ block_q6_K * restrict y = vy;
+ quantize_row_q6_K_reference(x, y, k);
+}
+
+size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist) {
+ assert(k % QK_K == 0);
+ (void)hist; // TODO: collect histograms
+
+ for (int j = 0; j < n; j += k) {
+ block_q6_K * restrict y = (block_q6_K *)dst + j/QK_K;
+ quantize_row_q6_K_reference(src + j, y, k);
+ }
+ return (n/QK_K*sizeof(block_q6_K));
+}
+
+//===================================== Q8_K ==============================================
+
+void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ float max = 0;
+ float amax = 0;
+ for (int j = 0; j < QK_K; ++j) {
+ float ax = fabsf(x[j]);
+ if (ax > amax) {
+ amax = ax; max = x[j];
+ }
+ }
+ if (!amax) {
+ y[i].d = 0;
+ memset(y[i].qs, 0, QK_K);
+ x += QK_K;
+ continue;
+ }
+ const float iscale = -128.f/max;
+ for (int j = 0; j < QK_K; ++j) {
+ int v = nearest_int(iscale*x[j]);
+ y[i].qs[j] = MIN(127, v);
+ }
+ for (int j = 0; j < QK_K/16; ++j) {
+ int sum = 0;
+ for (int ii = 0; ii < 16; ++ii) {
+ sum += y[i].qs[j*16 + ii];
+ }
+ y[i].bsums[j] = sum;
+ }
+ y[i].d = 1/iscale;
+ x += QK_K;
+ }
+}
+
+void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+ for (int j = 0; j < QK_K; ++j) {
+ *y++ = x[i].d * x[i].qs[j];
+ }
+ }
+}
+
+void quantize_row_q8_K(const float * restrict x, void * restrict y, int k) {
+ quantize_row_q8_K_reference(x, y, k);
+}
+
+//===================================== Dot ptoducts =================================
+
+//
+// Helper functions
+//
+#if __AVX__ || __AVX2__ || __AVX512F__
+
+// shuffles to pick the required scales in dot products
+static inline __m256i get_scale_shuffle_q3k(int i) {
+ static const uint8_t k_shuffle[128] = {
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
+ 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
+ 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
+ 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13, 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,
+ };
+ return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
+}
+static inline __m256i get_scale_shuffle_k4(int i) {
+ static const uint8_t k_shuffle[256] = {
+ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1,
+ 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3,
+ 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5,
+ 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7,
+ 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9,
+ 10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,10,11,
+ 12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,12,13,
+ 14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15,14,15
+ };
+ return _mm256_loadu_si256((const __m256i*)k_shuffle + i);
+}
+static inline __m128i get_scale_shuffle(int i) {
+ static const uint8_t k_shuffle[128] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+ 10,10,10,10,10,10,10,10, 11,11,11,11,11,11,11,11,
+ 12,12,12,12,12,12,12,12, 13,13,13,13,13,13,13,13,
+ 14,14,14,14,14,14,14,14, 15,15,15,15,15,15,15,15
+ };
+ return _mm_loadu_si128((const __m128i*)k_shuffle + i);
+}
+#endif
+
+void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+
+ const block_q4_0 * restrict x = vx;
+ const block_q8_0 * restrict y = vy;
+
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ assert(nb % 2 == 0); // TODO: handle odd nb
+
+ for (int i = 0; i < nb; i += 2) {
+ const block_q4_0 * restrict x0 = &x[i + 0];
+ const block_q4_0 * restrict x1 = &x[i + 1];
+ const block_q8_0 * restrict y0 = &y[i + 0];
+ const block_q8_0 * restrict y1 = &y[i + 1];
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+ const int8x16_t s8b = vdupq_n_s8(0x8);
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // sub 8
+ const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
+ const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
+ const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
+ const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
+
+ // load y
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ // dot product into int32x4_t
+ const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
+ const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+#else
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0h));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h));
+
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1l));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l));
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1h));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h));
+
+ const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
+ const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
+ const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
+ const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+#endif
+ }
+
+ *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
+#elif defined(__AVX2__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ // Main loop
+ for (int i = 0; i < nb; ++i) {
+ /* Compute combined scale for the block */
+ const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
+
+ __m256i bx = bytes_from_nibbles_32(x[i].qs);
+
+ // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
+ const __m256i off = _mm256_set1_epi8( 8 );
+ bx = _mm256_sub_epi8( bx, off );
+
+ __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+
+ const __m256 q = mul_sum_i8_pairs_float(bx, by);
+
+ /* Multiply q with scale and accumulate */
+ acc = _mm256_fmadd_ps( d, q, acc );
+ }
+
+ *s = hsum_float_8(acc);
+#elif defined(__AVX__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ // Main loop
+ for (int i = 0; i < nb; ++i) {
+ // Compute combined scale for the block
+ const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
+
+ const __m128i lowMask = _mm_set1_epi8(0xF);
+ const __m128i off = _mm_set1_epi8(8);
+
+ const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
+
+ __m128i bx = _mm_and_si128(lowMask, tmp);
+ __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs);
+ bx = _mm_sub_epi8(bx, off);
+ const __m128i i32_0 = mul_sum_i8_pairs(bx, by);
+
+ bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
+ by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
+ bx = _mm_sub_epi8(bx, off);
+ const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
+
+ // Convert int32_t to float
+ __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
+
+ // Apply the scale, and accumulate
+ acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
+ }
+
+ *s = hsum_float_8(acc);
+#elif defined(__SSSE3__)
+ // set constants
+ const __m128i lowMask = _mm_set1_epi8(0xF);
+ const __m128i off = _mm_set1_epi8(8);
+
+ // Initialize accumulator with zeros
+ __m128 acc_0 = _mm_setzero_ps();
+ __m128 acc_1 = _mm_setzero_ps();
+ __m128 acc_2 = _mm_setzero_ps();
+ __m128 acc_3 = _mm_setzero_ps();
+
+ // First round without accumulation
+ {
+ _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
+ _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
+
+ // Compute combined scale for the block 0 and 1
+ const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
+
+ const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
+
+ __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
+ __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
+ bx_0 = _mm_sub_epi8(bx_0, off);
+ const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
+
+ __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
+ __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
+ bx_1 = _mm_sub_epi8(bx_1, off);
+ const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
+
+ _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
+ _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
+
+ // Compute combined scale for the block 2 and 3
+ const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
+
+ const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
+
+ __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
+ __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
+ bx_2 = _mm_sub_epi8(bx_2, off);
+ const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
+
+ __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
+ __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
+ bx_3 = _mm_sub_epi8(bx_3, off);
+ const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
+
+ // Convert int32_t to float
+ __m128 p0 = _mm_cvtepi32_ps(i32_0);
+ __m128 p1 = _mm_cvtepi32_ps(i32_1);
+ __m128 p2 = _mm_cvtepi32_ps(i32_2);
+ __m128 p3 = _mm_cvtepi32_ps(i32_3);
+
+ // Apply the scale
+ acc_0 = _mm_mul_ps( d_0_1, p0 );
+ acc_1 = _mm_mul_ps( d_0_1, p1 );
+ acc_2 = _mm_mul_ps( d_2_3, p2 );
+ acc_3 = _mm_mul_ps( d_2_3, p3 );
+ }
+
+ assert(nb % 2 == 0); // TODO: handle odd nb
+
+ // Main loop
+ for (int i = 2; i < nb; i+=2) {
+ _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
+ _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
+
+ // Compute combined scale for the block 0 and 1
+ const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
+
+ const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
+
+ __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
+ __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
+ bx_0 = _mm_sub_epi8(bx_0, off);
+ const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
+
+ __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
+ __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
+ bx_1 = _mm_sub_epi8(bx_1, off);
+ const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
+
+ _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
+ _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
+
+ // Compute combined scale for the block 2 and 3
+ const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
+
+ const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
+
+ __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
+ __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
+ bx_2 = _mm_sub_epi8(bx_2, off);
+ const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
+
+ __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
+ __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
+ bx_3 = _mm_sub_epi8(bx_3, off);
+ const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
+
+ // Convert int32_t to float
+ __m128 p0 = _mm_cvtepi32_ps(i32_0);
+ __m128 p1 = _mm_cvtepi32_ps(i32_1);
+ __m128 p2 = _mm_cvtepi32_ps(i32_2);
+ __m128 p3 = _mm_cvtepi32_ps(i32_3);
+
+ // Apply the scale
+ __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
+ __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
+ __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
+ __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
+
+ // Acummulate
+ acc_0 = _mm_add_ps(p0_d, acc_0);
+ acc_1 = _mm_add_ps(p1_d, acc_1);
+ acc_2 = _mm_add_ps(p2_d, acc_2);
+ acc_3 = _mm_add_ps(p3_d, acc_3);
+ }
+
+ *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
+#elif defined(__riscv_v_intrinsic)
+ float sumf = 0.0;
+
+ size_t vl = __riscv_vsetvl_e8m1(qk/2);
+
+ for (int i = 0; i < nb; i++) {
+ // load elements
+ vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
+
+ vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
+ vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
+
+ // mask and store lower part of x, and then upper part
+ vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
+ vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
+
+ vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
+ vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
+
+ // subtract offset
+ vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 8, vl);
+ vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 8, vl);
+
+ vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
+ vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
+
+ vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
+
+ vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
+ vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
+
+ int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
+
+ sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
+ }
+
+ *s = sumf;
+#else
+ // scalar
+ float sumf = 0.0;
+
+ for (int i = 0; i < nb; i++) {
+ int sumi = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const int v0 = (x[i].qs[j] & 0x0F) - 8;
+ const int v1 = (x[i].qs[j] >> 4) - 8;
+
+ sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
+ }
+
+ sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
+ }
+
+ *s = sumf;
+#endif
+}
+
+void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ const int qk = QK8_1;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+
+ const block_q4_1 * restrict x = vx;
+ const block_q8_1 * restrict y = vy;
+
+ // TODO: add WASM SIMD
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ float summs = 0;
+
+ assert(nb % 2 == 0); // TODO: handle odd nb
+
+ for (int i = 0; i < nb; i += 2) {
+ const block_q4_1 * restrict x0 = &x[i + 0];
+ const block_q4_1 * restrict x1 = &x[i + 1];
+ const block_q8_1 * restrict y0 = &y[i + 0];
+ const block_q8_1 * restrict y1 = &y[i + 1];
+
+ summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s;
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // load y
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ // dot product into int32x4_t
+ const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
+ const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d);
+#else
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0h));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0h));
+
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1l));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1l));
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1h));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1h));
+
+ const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
+ const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
+ const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
+ const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
+#endif
+ }
+
+ *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
+#elif defined(__AVX2__) || defined(__AVX__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ float summs = 0;
+
+ // Main loop
+ for (int i = 0; i < nb; ++i) {
+ const float d0 = GGML_FP16_TO_FP32(x[i].d);
+ const float d1 = y[i].d;
+
+ summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
+
+ const __m256 d0v = _mm256_set1_ps( d0 );
+ const __m256 d1v = _mm256_set1_ps( d1 );
+
+ // Compute combined scales
+ const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
+
+ // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
+ const __m256i bx = bytes_from_nibbles_32(x[i].qs);
+ const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs );
+
+ const __m256 xy = mul_sum_us8_pairs_float(bx, by);
+
+ // Accumulate d0*d1*x*y
+#if defined(__AVX2__)
+ acc = _mm256_fmadd_ps( d0d1, xy, acc );
+#else
+ acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
+#endif
+ }
+
+ *s = hsum_float_8(acc) + summs;
+#elif defined(__riscv_v_intrinsic)
+ float sumf = 0.0;
+
+ size_t vl = __riscv_vsetvl_e8m1(qk/2);
+
+ for (int i = 0; i < nb; i++) {
+ // load elements
+ vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
+
+ vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
+ vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
+
+ // mask and store lower part of x, and then upper part
+ vuint8mf2_t x_a = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
+ vuint8mf2_t x_l = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
+
+ vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
+ vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
+
+ vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
+ vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
+
+ vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
+
+ vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
+ vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
+
+ int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
+
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
+ }
+
+ *s = sumf;
+#else
+ // scalar
+ float sumf = 0.0;
+
+ for (int i = 0; i < nb; i++) {
+ int sumi = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const int v0 = (x[i].qs[j] & 0x0F);
+ const int v1 = (x[i].qs[j] >> 4);
+
+ sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
+ }
+
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
+ }
+
+ *s = sumf;
+#endif
+}
+
+void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+ assert(qk == QK5_0);
+
+ const block_q5_0 * restrict x = vx;
+ const block_q8_0 * restrict y = vy;
+
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ uint32_t qh0;
+ uint32_t qh1;
+
+ uint64_t tmp0[4];
+ uint64_t tmp1[4];
+
+ assert(nb % 2 == 0); // TODO: handle odd nb
+
+ for (int i = 0; i < nb; i += 2) {
+ const block_q5_0 * restrict x0 = &x[i];
+ const block_q5_0 * restrict x1 = &x[i + 1];
+ const block_q8_0 * restrict y0 = &y[i];
+ const block_q8_0 * restrict y1 = &y[i + 1];
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+
+ // extract the 5th bit via lookup table ((!b) << 4)
+ memcpy(&qh0, x0->qh, sizeof(qh0));
+ memcpy(&qh1, x1->qh, sizeof(qh1));
+
+ tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
+ tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
+ tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
+ tmp0[3] = table_b2b_1[(qh0 >> 24) ];
+
+ tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
+ tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
+ tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
+ tmp1[3] = table_b2b_1[(qh1 >> 24) ];
+
+ const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
+ const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
+ const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
+ const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
+ const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
+ const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
+ const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
+ const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
+
+ // load y
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
+ vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
+ vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+#else
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
+
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
+
+ const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
+ const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
+ const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
+ const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+#endif
+ }
+
+ *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
+#elif defined(__wasm_simd128__)
+ v128_t sumv = wasm_f32x4_splat(0.0f);
+
+ uint32_t qh;
+ uint64_t tmp[4];
+
+ // TODO: check if unrolling this is better
+ for (int i = 0; i < nb; ++i) {
+ const block_q5_0 * restrict x0 = &x[i];
+ const block_q8_0 * restrict y0 = &y[i];
+
+ const v128_t m4b = wasm_i8x16_splat(0x0F);
+
+ // extract the 5th bit
+ memcpy(&qh, x0->qh, sizeof(qh));
+
+ tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
+ tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
+ tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
+ tmp[3] = table_b2b_1[(qh >> 24) ];
+
+ const v128_t qhl = wasm_v128_load(tmp + 0);
+ const v128_t qhh = wasm_v128_load(tmp + 2);
+
+ const v128_t v0 = wasm_v128_load(x0->qs);
+
+ // 4-bit -> 8-bit
+ const v128_t v0l = wasm_v128_and (v0, m4b);
+ const v128_t v0h = wasm_u8x16_shr(v0, 4);
+
+ // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
+ const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
+ const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
+
+ // load y
+ const v128_t v1l = wasm_v128_load(y0->qs);
+ const v128_t v1h = wasm_v128_load(y0->qs + 16);
+
+ // int8x16 -> int16x8
+ const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
+ const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
+ const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
+ const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
+
+ const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
+ const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
+ const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
+ const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
+
+ // dot product
+ sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
+ wasm_i32x4_add(
+ wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
+ wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
+ wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
+ wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
+ wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
+ }
+
+ *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
+ wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
+#elif defined(__AVX2__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ // Main loop
+ for (int i = 0; i < nb; i++) {
+ /* Compute combined scale for the block */
+ const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
+
+ __m256i bx = bytes_from_nibbles_32(x[i].qs);
+ __m256i bxhi = bytes_from_bits_32(x[i].qh);
+ bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
+ bx = _mm256_or_si256(bx, bxhi);
+
+ __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+
+ const __m256 q = mul_sum_i8_pairs_float(bx, by);
+
+ /* Multiply q with scale and accumulate */
+ acc = _mm256_fmadd_ps(d, q, acc);
+ }
+
+ *s = hsum_float_8(acc);
+#elif defined(__AVX__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+ __m128i mask = _mm_set1_epi8((char)0xF0);
+
+ // Main loop
+ for (int i = 0; i < nb; i++) {
+ /* Compute combined scale for the block */
+ const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
+
+ __m256i bx = bytes_from_nibbles_32(x[i].qs);
+ const __m256i bxhi = bytes_from_bits_32(x[i].qh);
+ __m128i bxhil = _mm256_castsi256_si128(bxhi);
+ __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
+ bxhil = _mm_andnot_si128(bxhil, mask);
+ bxhih = _mm_andnot_si128(bxhih, mask);
+ __m128i bxl = _mm256_castsi256_si128(bx);
+ __m128i bxh = _mm256_extractf128_si256(bx, 1);
+ bxl = _mm_or_si128(bxl, bxhil);
+ bxh = _mm_or_si128(bxh, bxhih);
+ bx = MM256_SET_M128I(bxh, bxl);
+
+ const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+
+ const __m256 q = mul_sum_i8_pairs_float(bx, by);
+
+ /* Multiply q with scale and accumulate */
+ acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
+ }
+
+ *s = hsum_float_8(acc);
+#elif defined(__riscv_v_intrinsic)
+ float sumf = 0.0;
+
+ uint32_t qh;
+
+ size_t vl = __riscv_vsetvl_e8m1(qk/2);
+
+ // These tempory registers are for masking and shift operations
+ vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
+ vuint32m2_t vt_2 = __riscv_vsll_vv_u32m2(__riscv_vmv_v_x_u32m2(1, vl), vt_1, vl);
+
+ vuint32m2_t vt_3 = __riscv_vsll_vx_u32m2(vt_2, 16, vl);
+ vuint32m2_t vt_4 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
+
+ for (int i = 0; i < nb; i++) {
+ memcpy(&qh, x[i].qh, sizeof(uint32_t));
+
+ // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
+ vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(vt_2, qh, vl);
+ vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(xha_0, vt_1, vl);
+ vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
+
+ // ((qh & (1u << (j + 16))) >> (j + 12));
+ vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(vt_3, qh, vl);
+ vuint32m2_t xhl_1 = __riscv_vsrl_vv_u32m2(xha_1, vt_4, vl);
+
+ // narrowing
+ vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xhl_0, vl);
+ vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
+
+ vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xhl_1, vl);
+ vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
+
+ // load
+ vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
+
+ vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
+ vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
+
+ vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
+ vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
+
+ vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
+ vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
+
+ vint8mf2_t x_ai = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
+ vint8mf2_t x_li = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
+
+ vint8mf2_t v0 = __riscv_vsub_vx_i8mf2(x_ai, 16, vl);
+ vint8mf2_t v1 = __riscv_vsub_vx_i8mf2(x_li, 16, vl);
+
+ vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
+ vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
+
+ vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
+
+ vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
+ vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
+
+ int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
+
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
+ }
+
+ *s = sumf;
+#else
+ // scalar
+ float sumf = 0.0;
+
+ for (int i = 0; i < nb; i++) {
+ uint32_t qh;
+ memcpy(&qh, x[i].qh, sizeof(qh));
+
+ int sumi = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
+ const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
+
+ const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
+ const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
+
+ sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
+ }
+
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
+ }
+
+ *s = sumf;
+#endif
+}
+
+void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ const int qk = QK8_1;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+ assert(qk == QK5_1);
+
+ const block_q5_1 * restrict x = vx;
+ const block_q8_1 * restrict y = vy;
+
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ float summs0 = 0.0f;
+ float summs1 = 0.0f;
+
+ uint32_t qh0;
+ uint32_t qh1;
+
+ uint64_t tmp0[4];
+ uint64_t tmp1[4];
+
+ assert(nb % 2 == 0); // TODO: handle odd nb
+
+ for (int i = 0; i < nb; i += 2) {
+ const block_q5_1 * restrict x0 = &x[i];
+ const block_q5_1 * restrict x1 = &x[i + 1];
+ const block_q8_1 * restrict y0 = &y[i];
+ const block_q8_1 * restrict y1 = &y[i + 1];
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+
+ summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
+ summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
+
+ // extract the 5th bit via lookup table ((b) << 4)
+ memcpy(&qh0, x0->qh, sizeof(qh0));
+ memcpy(&qh1, x1->qh, sizeof(qh1));
+
+ tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
+ tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
+ tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
+ tmp0[3] = table_b2b_0[(qh0 >> 24) ];
+
+ tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
+ tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
+ tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
+ tmp1[3] = table_b2b_0[(qh1 >> 24) ];
+
+ const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
+ const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
+ const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
+ const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // add high bit
+ const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
+ const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
+ const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
+ const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
+
+ // load y
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
+ vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
+ vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d);
+#else
+ const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
+ const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
+ const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
+ const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
+
+ const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
+ const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
+ const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
+ const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
+
+ const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
+ const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
+ const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
+ const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
+#endif
+ }
+
+ *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
+#elif defined(__wasm_simd128__)
+ v128_t sumv = wasm_f32x4_splat(0.0f);
+
+ float summs = 0.0f;
+
+ uint32_t qh;
+ uint64_t tmp[4];
+
+ // TODO: check if unrolling this is better
+ for (int i = 0; i < nb; ++i) {
+ const block_q5_1 * restrict x0 = &x[i];
+ const block_q8_1 * restrict y0 = &y[i];
+
+ summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
+
+ const v128_t m4b = wasm_i8x16_splat(0x0F);
+
+ // extract the 5th bit
+ memcpy(&qh, x0->qh, sizeof(qh));
+
+ tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
+ tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
+ tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
+ tmp[3] = table_b2b_0[(qh >> 24) ];
+
+ const v128_t qhl = wasm_v128_load(tmp + 0);
+ const v128_t qhh = wasm_v128_load(tmp + 2);
+
+ const v128_t v0 = wasm_v128_load(x0->qs);
+
+ // 4-bit -> 8-bit
+ const v128_t v0l = wasm_v128_and (v0, m4b);
+ const v128_t v0h = wasm_u8x16_shr(v0, 4);
+
+ // add high bit
+ const v128_t v0lf = wasm_v128_or(v0l, qhl);
+ const v128_t v0hf = wasm_v128_or(v0h, qhh);
+
+ // load y
+ const v128_t v1l = wasm_v128_load(y0->qs);
+ const v128_t v1h = wasm_v128_load(y0->qs + 16);
+
+ // int8x16 -> int16x8
+ const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
+ const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
+ const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
+ const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
+
+ const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
+ const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
+ const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
+ const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
+
+ // dot product
+ sumv = wasm_f32x4_add(sumv,
+ wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
+ wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
+ wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
+ wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
+ wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
+ wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d)));
+ }
+
+ *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
+ wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
+#elif defined(__AVX2__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ float summs = 0.0f;
+
+ // Main loop
+ for (int i = 0; i < nb; i++) {
+ const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
+
+ summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
+
+ __m256i bx = bytes_from_nibbles_32(x[i].qs);
+ __m256i bxhi = bytes_from_bits_32(x[i].qh);
+ bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
+ bx = _mm256_or_si256(bx, bxhi);
+
+ const __m256 dy = _mm256_set1_ps(y[i].d);
+ const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+
+ const __m256 q = mul_sum_us8_pairs_float(bx, by);
+
+ acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
+ }
+
+ *s = hsum_float_8(acc) + summs;
+#elif defined(__AVX__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+ __m128i mask = _mm_set1_epi8(0x10);
+
+ float summs = 0.0f;
+
+ // Main loop
+ for (int i = 0; i < nb; i++) {
+ const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
+
+ summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
+
+ __m256i bx = bytes_from_nibbles_32(x[i].qs);
+ const __m256i bxhi = bytes_from_bits_32(x[i].qh);
+ __m128i bxhil = _mm256_castsi256_si128(bxhi);
+ __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
+ bxhil = _mm_and_si128(bxhil, mask);
+ bxhih = _mm_and_si128(bxhih, mask);
+ __m128i bxl = _mm256_castsi256_si128(bx);
+ __m128i bxh = _mm256_extractf128_si256(bx, 1);
+ bxl = _mm_or_si128(bxl, bxhil);
+ bxh = _mm_or_si128(bxh, bxhih);
+ bx = MM256_SET_M128I(bxh, bxl);
+
+ const __m256 dy = _mm256_set1_ps(y[i].d);
+ const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+
+ const __m256 q = mul_sum_us8_pairs_float(bx, by);
+
+ acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
+ }
+
+ *s = hsum_float_8(acc) + summs;
+#elif defined(__riscv_v_intrinsic)
+ float sumf = 0.0;
+
+ uint32_t qh;
+
+ size_t vl = __riscv_vsetvl_e8m1(qk/2);
+
+ // temporary registers for shift operations
+ vuint32m2_t vt_1 = __riscv_vid_v_u32m2(vl);
+ vuint32m2_t vt_2 = __riscv_vadd_vx_u32m2(vt_1, 12, vl);
+
+ for (int i = 0; i < nb; i++) {
+ memcpy(&qh, x[i].qh, sizeof(uint32_t));
+
+ // load qh
+ vuint32m2_t vqh = __riscv_vmv_v_x_u32m2(qh, vl);
+
+ // ((qh >> (j + 0)) << 4) & 0x10;
+ vuint32m2_t xhr_0 = __riscv_vsrl_vv_u32m2(vqh, vt_1, vl);
+ vuint32m2_t xhl_0 = __riscv_vsll_vx_u32m2(xhr_0, 4, vl);
+ vuint32m2_t xha_0 = __riscv_vand_vx_u32m2(xhl_0, 0x10, vl);
+
+ // ((qh >> (j + 12)) ) & 0x10;
+ vuint32m2_t xhr_1 = __riscv_vsrl_vv_u32m2(vqh, vt_2, vl);
+ vuint32m2_t xha_1 = __riscv_vand_vx_u32m2(xhr_1, 0x10, vl);
+
+ // narrowing
+ vuint16m1_t xhc_0 = __riscv_vncvt_x_x_w_u16m1(xha_0, vl);
+ vuint8mf2_t xh_0 = __riscv_vncvt_x_x_w_u8mf2(xhc_0, vl);
+
+ vuint16m1_t xhc_1 = __riscv_vncvt_x_x_w_u16m1(xha_1, vl);
+ vuint8mf2_t xh_1 = __riscv_vncvt_x_x_w_u8mf2(xhc_1, vl);
+
+ // load
+ vuint8mf2_t tx = __riscv_vle8_v_u8mf2(x[i].qs, vl);
+
+ vint8mf2_t y0 = __riscv_vle8_v_i8mf2(y[i].qs, vl);
+ vint8mf2_t y1 = __riscv_vle8_v_i8mf2(y[i].qs+16, vl);
+
+ vuint8mf2_t x_at = __riscv_vand_vx_u8mf2(tx, 0x0F, vl);
+ vuint8mf2_t x_lt = __riscv_vsrl_vx_u8mf2(tx, 0x04, vl);
+
+ vuint8mf2_t x_a = __riscv_vor_vv_u8mf2(x_at, xh_0, vl);
+ vuint8mf2_t x_l = __riscv_vor_vv_u8mf2(x_lt, xh_1, vl);
+
+ vint8mf2_t v0 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_a);
+ vint8mf2_t v1 = __riscv_vreinterpret_v_u8mf2_i8mf2(x_l);
+
+ vint16m1_t vec_mul1 = __riscv_vwmul_vv_i16m1(v0, y0, vl);
+ vint16m1_t vec_mul2 = __riscv_vwmul_vv_i16m1(v1, y1, vl);
+
+ vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
+
+ vint32m1_t vs1 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul1, vec_zero, vl);
+ vint32m1_t vs2 = __riscv_vwredsum_vs_i16m1_i32m1(vec_mul2, vs1, vl);
+
+ int sumi = __riscv_vmv_x_s_i32m1_i32(vs2);
+
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
+ }
+
+ *s = sumf;
+#else
+ // scalar
+ float sumf = 0.0;
+
+ for (int i = 0; i < nb; i++) {
+ uint32_t qh;
+ memcpy(&qh, x[i].qh, sizeof(qh));
+
+ int sumi = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
+ const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
+
+ const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
+ const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
+
+ sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
+ }
+
+ sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
+ }
+
+ *s = sumf;
+#endif
+}
+
+void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+
+ const block_q8_0 * restrict x = vx;
+ const block_q8_0 * restrict y = vy;
+
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ assert(nb % 2 == 0); // TODO: handle odd nb
+
+ for (int i = 0; i < nb; i += 2) {
+ const block_q8_0 * restrict x0 = &x[i + 0];
+ const block_q8_0 * restrict x1 = &x[i + 1];
+ const block_q8_0 * restrict y0 = &y[i + 0];
+ const block_q8_0 * restrict y1 = &y[i + 1];
+
+ const int8x16_t x0_0 = vld1q_s8(x0->qs);
+ const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
+ const int8x16_t x1_0 = vld1q_s8(x1->qs);
+ const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
+
+ // load y
+ const int8x16_t y0_0 = vld1q_s8(y0->qs);
+ const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
+ const int8x16_t y1_0 = vld1q_s8(y1->qs);
+ const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
+ vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
+
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
+ vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
+ vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+
+#else
+ const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0));
+ const int16x8_t p0_1 = vmull_s8(vget_high_s8(x0_0), vget_high_s8(y0_0));
+ const int16x8_t p0_2 = vmull_s8(vget_low_s8 (x0_1), vget_low_s8 (y0_1));
+ const int16x8_t p0_3 = vmull_s8(vget_high_s8(x0_1), vget_high_s8(y0_1));
+
+ const int16x8_t p1_0 = vmull_s8(vget_low_s8 (x1_0), vget_low_s8 (y1_0));
+ const int16x8_t p1_1 = vmull_s8(vget_high_s8(x1_0), vget_high_s8(y1_0));
+ const int16x8_t p1_2 = vmull_s8(vget_low_s8 (x1_1), vget_low_s8 (y1_1));
+ const int16x8_t p1_3 = vmull_s8(vget_high_s8(x1_1), vget_high_s8(y1_1));
+
+ const int32x4_t p0 = vaddq_s32(vpaddlq_s16(p0_0), vpaddlq_s16(p0_1));
+ const int32x4_t p1 = vaddq_s32(vpaddlq_s16(p0_2), vpaddlq_s16(p0_3));
+ const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1));
+ const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3));
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+#endif
+ }
+
+ *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
+#elif defined(__AVX2__) || defined(__AVX__)
+ // Initialize accumulator with zeros
+ __m256 acc = _mm256_setzero_ps();
+
+ // Main loop
+ for (int i = 0; i < nb; ++i) {
+ // Compute combined scale for the block
+ const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
+ __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs);
+ __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+
+ const __m256 q = mul_sum_i8_pairs_float(bx, by);
+
+ // Multiply q with scale and accumulate
+#if defined(__AVX2__)
+ acc = _mm256_fmadd_ps( d, q, acc );
+#else
+ acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
+#endif
+ }
+
+ *s = hsum_float_8(acc);
+#elif defined(__riscv_v_intrinsic)
+ float sumf = 0.0;
+ size_t vl = __riscv_vsetvl_e8m1(qk);
+
+ for (int i = 0; i < nb; i++) {
+ // load elements
+ vint8m1_t bx = __riscv_vle8_v_i8m1(x[i].qs, vl);
+ vint8m1_t by = __riscv_vle8_v_i8m1(y[i].qs, vl);
+
+ vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx, by, vl);
+
+ vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl);
+ vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl);
+
+ int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum);
+
+ sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
+ }
+
+ *s = sumf;
+#else
+ // scalar
+ float sumf = 0.0;
+
+ for (int i = 0; i < nb; i++) {
+ int sumi = 0;
+
+ for (int j = 0; j < qk; j++) {
+ sumi += x[i].qs[j]*y[i].qs[j];
+ }
+
+ sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
+ }
+
+ *s = sumf;
+#endif
+}
+
+#if QK_K == 256
+void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+
+ const block_q2_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ const uint8x16_t m3 = vdupq_n_u8(0x3);
+ const uint8x16_t m4 = vdupq_n_u8(0xF);
+#if defined(__ARM_FEATURE_DOTPROD)
+ const int32x4_t vzero = vdupq_n_s32(0);
+#endif
+
+ ggml_int8x16x2_t q2bytes;
+ uint8_t aux[16];
+
+ float sum = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+ const uint8_t * restrict sc = x[i].scales;
+
+ const uint8x16_t mins_and_scales = vld1q_u8(sc);
+ const uint8x16_t scales = vandq_u8(mins_and_scales, m4);
+ vst1q_u8(aux, scales);
+
+ const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4);
+ const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
+ const ggml_int16x8x2_t mins16 = {vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))};
+ const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])),
+ vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0])));
+ const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])),
+ vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1])));
+ sum += dmin * vaddvq_s32(vaddq_s32(s0, s1));
+
+ int isum = 0;
+ int is = 0;
+
+// We use this macro instead of a function call because for some reason
+// the code runs 2-3% slower, even if the function is declared inline
+#if defined(__ARM_FEATURE_DOTPROD)
+#define MULTIPLY_ACCUM_WITH_SCALE(index)\
+ isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\
+ isum += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)];
+#else
+#define MULTIPLY_ACCUM_WITH_SCALE(index)\
+ {\
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[0]), vget_low_s8 (q8bytes.val[0])),\
+ vmull_s8(vget_high_s8(q2bytes.val[0]), vget_high_s8(q8bytes.val[0])));\
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[1]), vget_low_s8 (q8bytes.val[1])),\
+ vmull_s8(vget_high_s8(q2bytes.val[1]), vget_high_s8(q8bytes.val[1])));\
+ isum += vaddvq_s16(p1) * aux[is+(index)] + vaddvq_s16(p2) * aux[is+1+(index)];\
+ }
+#endif
+
+#define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\
+ q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\
+ q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\
+ q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\
+ MULTIPLY_ACCUM_WITH_SCALE((index));
+
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32;
+
+ ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
+ q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3));
+ q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3));
+ MULTIPLY_ACCUM_WITH_SCALE(0);
+
+ SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2);
+
+ SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4);
+
+ SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6);
+
+ is += 8;
+ }
+ sum += d * isum;
+
+ }
+
+ *s = sum;
+
+#elif defined __AVX2__
+
+ const __m256i m3 = _mm256_set1_epi8(3);
+ const __m128i m4 = _mm_set1_epi8(0xF);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
+ const __m128i scales8 = _mm_and_si128(mins_and_scales, m4);
+ const __m128i mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
+ const __m256i mins = _mm256_cvtepi8_epi16(mins8);
+ const __m256i prod = _mm256_madd_epi16(mins, _mm256_loadu_si256((const __m256i*)y[i].bsums));
+
+ acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc);
+
+ const __m256i all_scales = _mm256_cvtepi8_epi16(scales8);
+ const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
+ const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
+ const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
+
+ __m256i sumi = _mm256_setzero_si256();
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const __m256i q2bits = _mm256_loadu_si256((const __m256i*)q2); q2 += 32;
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+
+ const __m256i q2_0 = _mm256_and_si256(q2bits, m3);
+ const __m256i q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3);
+ const __m256i q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3);
+ const __m256i q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3);
+
+ __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
+ __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
+ __m256i p2 = _mm256_maddubs_epi16(q2_2, q8_2);
+ __m256i p3 = _mm256_maddubs_epi16(q2_3, q8_3);
+
+ p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(0)), p0);
+ p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(1)), p1);
+ p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(2)), p2);
+ p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(3)), p3);
+
+ p0 = _mm256_add_epi32(p0, p1);
+ p2 = _mm256_add_epi32(p2, p3);
+
+ sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2));
+ }
+
+ acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __AVX__
+
+ const __m128i m3 = _mm_set1_epi8(0x3);
+ const __m128i m4 = _mm_set1_epi8(0xF);
+ const __m128i m2 = _mm_set1_epi8(0x2);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ // load mins and scales from block_q2_K.scales[QK_K/16]
+ const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
+ const __m128i scales16 = _mm_and_si128(mins_and_scales, m4);
+ const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
+ const __m128i mins_0 = _mm_cvtepi8_epi16(mins16);
+ const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16));
+
+ // summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2
+ const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0]));
+ const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8]));
+
+ // sumf += -dmin * summs in 32bits*8
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(MM256_SET_M128I(summs_1, summs_0))), acc);
+
+ const __m128i scales_0 = _mm_cvtepi8_epi16(scales16);
+ const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16));
+ const __m128i scales[2] = { scales_0, scales_1 };
+
+ __m128i sumi_0 = _mm_setzero_si128();
+ __m128i sumi_1 = _mm_setzero_si128();
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ // load Q8 quants int8*16*8 from block_q8_K.qs[QK_K]
+ const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+
+ // load 2bits*16*8 from block_q2_K.qs[QK_K/4]
+ __m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
+ const __m128i q2_0 = _mm_and_si128(q2bits, m3);
+ const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
+ const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
+ const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
+ q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
+ const __m128i q2_1 = _mm_and_si128(q2bits, m3);
+ const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
+ const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
+ const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
+
+ // isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8
+ __m128i p0 = _mm_maddubs_epi16(q2_0, q8_0);
+ __m128i p1 = _mm_maddubs_epi16(q2_1, q8_1);
+ __m128i p2 = _mm_maddubs_epi16(q2_2, q8_2);
+ __m128i p3 = _mm_maddubs_epi16(q2_3, q8_3);
+ __m128i p4 = _mm_maddubs_epi16(q2_4, q8_4);
+ __m128i p5 = _mm_maddubs_epi16(q2_5, q8_5);
+ __m128i p6 = _mm_maddubs_epi16(q2_6, q8_6);
+ __m128i p7 = _mm_maddubs_epi16(q2_7, q8_7);
+
+ // isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8
+ __m128i shuffle = _mm_set1_epi16(0x0100);
+ p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7);
+
+ p0 = _mm_add_epi32(p0, p1);
+ p2 = _mm_add_epi32(p2, p3);
+ p4 = _mm_add_epi32(p4, p5);
+ p6 = _mm_add_epi32(p6, p7);
+
+ // isum in 32bits*4*2
+ sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2));
+ sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6));
+ }
+
+ // sumf += dall * isum - dmin * summs in 32bits
+ __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc);
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __riscv_v_intrinsic
+
+ float sumf = 0;
+ uint8_t temp_01[32] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * q2 = x[i].qs;
+ const int8_t * q8 = y[i].qs;
+ const uint8_t * sc = x[i].scales;
+
+ const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ size_t vl = 16;
+
+ vuint8m1_t scales = __riscv_vle8_v_u8m1(sc, vl);
+ vuint8m1_t aux = __riscv_vand_vx_u8m1(scales, 0x0F, vl);
+
+ vint16m1_t q8sums = __riscv_vle16_v_i16m1(y[i].bsums, vl);
+
+ vuint8mf2_t scales_2 = __riscv_vle8_v_u8mf2(sc, vl);
+ vuint8mf2_t mins8 = __riscv_vsrl_vx_u8mf2(scales_2, 0x4, vl);
+ vint16m1_t mins = __riscv_vreinterpret_v_u16m1_i16m1(__riscv_vzext_vf2_u16m1(mins8, vl));
+ vint32m2_t prod = __riscv_vwmul_vv_i32m2(q8sums, mins, vl);
+ vint32m1_t vsums = __riscv_vredsum_vs_i32m2_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
+
+ sumf += dmin * __riscv_vmv_x_s_i32m1_i32(vsums);
+
+ vl = 32;
+
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+ vuint8m1_t v_b = __riscv_vle8_v_u8m1(temp_01, vl);
+
+ uint8_t is=0;
+ int isum=0;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+ // load Q2
+ vuint8m1_t q2_x = __riscv_vle8_v_u8m1(q2, vl);
+
+ vuint8m1_t q2_0 = __riscv_vand_vx_u8m1(q2_x, 0x03, vl);
+ vuint8m1_t q2_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x2, vl), 0x03 , vl);
+ vuint8m1_t q2_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x4, vl), 0x03 , vl);
+ vuint8m1_t q2_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q2_x, 0x6, vl), 0x03 , vl);
+
+ // duplicate scale elements for product
+ vuint8m1_t sc0 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 0+is, vl), vl);
+ vuint8m1_t sc1 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 2+is, vl), vl);
+ vuint8m1_t sc2 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 4+is, vl), vl);
+ vuint8m1_t sc3 = __riscv_vrgather_vv_u8m1(aux, __riscv_vadd_vx_u8m1(v_b, 6+is, vl), vl);
+
+ vint16m2_t p0 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_0, sc0, vl));
+ vint16m2_t p1 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_1, sc1, vl));
+ vint16m2_t p2 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_2, sc2, vl));
+ vint16m2_t p3 = __riscv_vreinterpret_v_u16m2_i16m2(__riscv_vwmulu_vv_u16m2(q2_3, sc3, vl));
+
+ // load Q8
+ vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
+ vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
+ vint8m1_t q8_2 = __riscv_vle8_v_i8m1(q8+64, vl);
+ vint8m1_t q8_3 = __riscv_vle8_v_i8m1(q8+96, vl);
+
+ vint32m4_t s0 = __riscv_vwmul_vv_i32m4(p0, __riscv_vwcvt_x_x_v_i16m2(q8_0, vl), vl);
+ vint32m4_t s1 = __riscv_vwmul_vv_i32m4(p1, __riscv_vwcvt_x_x_v_i16m2(q8_1, vl), vl);
+ vint32m4_t s2 = __riscv_vwmul_vv_i32m4(p2, __riscv_vwcvt_x_x_v_i16m2(q8_2, vl), vl);
+ vint32m4_t s3 = __riscv_vwmul_vv_i32m4(p3, __riscv_vwcvt_x_x_v_i16m2(q8_3, vl), vl);
+
+ vint32m1_t isum0 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s0, s1, vl), vzero, vl);
+ vint32m1_t isum1 = __riscv_vredsum_vs_i32m4_i32m1(__riscv_vadd_vv_i32m4(s2, s3, vl), isum0, vl);
+
+ isum += __riscv_vmv_x_s_i32m1_i32(isum1);
+
+ q2+=32; q8+=128; is=8;
+
+ }
+
+ sumf += dall * isum;
+
+ }
+
+ *s = sumf;
+
+#else
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * q2 = x[i].qs;
+ const int8_t * q8 = y[i].qs;
+ const uint8_t * sc = x[i].scales;
+
+ int summs = 0;
+ for (int j = 0; j < 16; ++j) {
+ summs += y[i].bsums[j] * (sc[j] >> 4);
+ }
+
+ const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ int isum = 0;
+ int is = 0;
+ int d;
+ for (int k = 0; k < QK_K/128; ++k) {
+ int shift = 0;
+ for (int j = 0; j < 4; ++j) {
+ d = sc[is++] & 0xF;
+ int isuml = 0;
+ for (int l = 0; l < 16; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
+ isum += d * isuml;
+ d = sc[is++] & 0xF;
+ isuml = 0;
+ for (int l = 16; l < 32; ++l) isuml += q8[l] * ((q2[l] >> shift) & 3);
+ isum += d * isuml;
+ shift += 2;
+ q8 += 32;
+ }
+ q2 += 32;
+ }
+ sumf += dall * isum - dmin * summs;
+ }
+ *s = sumf;
+#endif
+}
+
+#else
+
+void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+
+ const block_q2_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ const uint8x16_t m3 = vdupq_n_u8(0x3);
+#if defined(__ARM_FEATURE_DOTPROD)
+ const int32x4_t vzero = vdupq_n_s32(0);
+#endif
+
+ ggml_int8x16x4_t q2bytes;
+
+ uint32_t aux32[2];
+ const uint8_t * scales = (const uint8_t *)aux32;
+
+ float sum = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * (float)x[i].d;
+ const float dmin = -y[i].d * (float)x[i].dmin;
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+ const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
+
+ aux32[0] = sc[0] & 0x0f0f0f0f;
+ aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
+
+ sum += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
+
+ int isum1 = 0, isum2 = 0;
+
+ const uint8x16_t q2bits = vld1q_u8(q2);
+
+ const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
+
+ q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits, m3));
+ q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 2), m3));
+ q2bytes.val[2] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 4), m3));
+ q2bytes.val[3] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits, 6), m3));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ isum1 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * scales[0];
+ isum2 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * scales[1];
+ isum1 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[2], q8bytes.val[2])) * scales[2];
+ isum2 += vaddvq_s32(vdotq_s32(vzero, q2bytes.val[3], q8bytes.val[3])) * scales[3];
+#else
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q2bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q2bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ isum1 += vaddvq_s16(p1) * scales[0];
+ isum2 += vaddvq_s16(p2) * scales[1];
+
+ const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q2bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ const int16x8_t p4 = vaddq_s16(vmull_s8(vget_low_s8 (q2bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q2bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ isum1 += vaddvq_s16(p3) * scales[2];
+ isum2 += vaddvq_s16(p4) * scales[3];
+#endif
+ sum += d * (isum1 + isum2);
+
+ }
+
+ *s = sum;
+
+#elif defined __AVX2__
+
+ const __m256i m3 = _mm256_set1_epi8(3);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ uint32_t ud, um;
+ const uint8_t * restrict db = (const uint8_t *)&ud;
+ const uint8_t * restrict mb = (const uint8_t *)&um;
+
+ float summs = 0;
+
+ // TODO: optimize this
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
+ ud = (sc[0] >> 0) & 0x0f0f0f0f;
+ um = (sc[0] >> 4) & 0x0f0f0f0f;
+
+ int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
+ summs += dmin * smin;
+
+ const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
+ const __m256i q2_0 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 2), q2bits), m3);
+ const __m256i q2_1 = _mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q2bits, 6), _mm_srli_epi16(q2bits, 4)), m3);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ const __m256i p0 = _mm256_maddubs_epi16(q2_0, q8_0);
+ const __m256i p1 = _mm256_maddubs_epi16(q2_1, q8_1);
+
+ const __m256i p_0 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 0));
+ const __m256i p_1 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p0, 1));
+ const __m256i p_2 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 0));
+ const __m256i p_3 = _mm256_cvtepi16_epi32(_mm256_extracti128_si256(p1, 1));
+
+ acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0), acc);
+ acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1), acc);
+ acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2), acc);
+ acc = _mm256_fmadd_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3), acc);
+ }
+
+ *s = hsum_float_8(acc) + summs;
+
+#elif defined __AVX__
+
+ const __m128i m3 = _mm_set1_epi8(3);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ uint32_t ud, um;
+ const uint8_t * restrict db = (const uint8_t *)&ud;
+ const uint8_t * restrict mb = (const uint8_t *)&um;
+
+ float summs = 0;
+
+ // TODO: optimize this
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
+ ud = (sc[0] >> 0) & 0x0f0f0f0f;
+ um = (sc[0] >> 4) & 0x0f0f0f0f;
+
+ int32_t smin = mb[0] * y[i].bsums[0] + mb[1] * y[i].bsums[1] + mb[2] * y[i].bsums[2] + mb[3] * y[i].bsums[3];
+ summs += dmin * smin;
+
+ const __m128i q2bits = _mm_loadu_si128((const __m128i*)q2);
+ const __m128i q2_0 = _mm_and_si128(q2bits, m3);
+ const __m128i q2_1 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
+ const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
+ const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ const __m128i p0 = _mm_maddubs_epi16(q2_0, _mm256_extractf128_si256(q8_0, 0));
+ const __m128i p1 = _mm_maddubs_epi16(q2_1, _mm256_extractf128_si256(q8_0, 1));
+ const __m128i p2 = _mm_maddubs_epi16(q2_2, _mm256_extractf128_si256(q8_1, 0));
+ const __m128i p3 = _mm_maddubs_epi16(q2_3, _mm256_extractf128_si256(q8_1, 1));
+
+ const __m256i p_0 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p0, p0)), _mm_cvtepi16_epi32(p0));
+ const __m256i p_1 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p1, p1)), _mm_cvtepi16_epi32(p1));
+ const __m256i p_2 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p2, p2)), _mm_cvtepi16_epi32(p2));
+ const __m256i p_3 = MM256_SET_M128I(_mm_cvtepi16_epi32(_mm_unpackhi_epi64(p3, p3)), _mm_cvtepi16_epi32(p3));
+
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[0]), _mm256_cvtepi32_ps(p_0)), acc);
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[1]), _mm256_cvtepi32_ps(p_1)), acc);
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[2]), _mm256_cvtepi32_ps(p_2)), acc);
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d * db[3]), _mm256_cvtepi32_ps(p_3)), acc);
+ }
+
+ *s = hsum_float_8(acc) + summs;
+
+#elif defined __riscv_v_intrinsic
+
+ uint32_t aux32[2];
+ const uint8_t * scales = (const uint8_t *)aux32;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * (float)x[i].d;
+ const float dmin = -y[i].d * (float)x[i].dmin;
+
+ const uint8_t * restrict q2 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+ const uint32_t * restrict sc = (const uint32_t *)x[i].scales;
+
+ aux32[0] = sc[0] & 0x0f0f0f0f;
+ aux32[1] = (sc[0] >> 4) & 0x0f0f0f0f;
+
+ sumf += dmin * (scales[4] * y[i].bsums[0] + scales[5] * y[i].bsums[1] + scales[6] * y[i].bsums[2] + scales[7] * y[i].bsums[3]);
+
+ int isum1 = 0;
+ int isum2 = 0;
+
+ size_t vl = 16;
+
+ vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
+
+ // load Q2
+ vuint8mf2_t q2_x = __riscv_vle8_v_u8mf2(q2, vl);
+
+ vint8mf2_t q2_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q2_x, 0x03, vl));
+ vint8mf2_t q2_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x2, vl), 0x03 , vl));
+ vint8mf2_t q2_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x4, vl), 0x03 , vl));
+ vint8mf2_t q2_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q2_x, 0x6, vl), 0x03 , vl));
+
+ // load Q8, and take product with Q2
+ vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q2_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
+ vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q2_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
+ vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q2_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
+ vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q2_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
+
+ vint16m1_t vs_0 = __riscv_vredsum_vs_i16m1_i16m1(p0, vzero, vl);
+ vint16m1_t vs_1 = __riscv_vredsum_vs_i16m1_i16m1(p1, vzero, vl);
+ vint16m1_t vs_2 = __riscv_vredsum_vs_i16m1_i16m1(p2, vzero, vl);
+ vint16m1_t vs_3 = __riscv_vredsum_vs_i16m1_i16m1(p3, vzero, vl);
+
+ isum1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[0];
+ isum2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[1];
+ isum1 += __riscv_vmv_x_s_i16m1_i16(vs_2) * scales[2];
+ isum2 += __riscv_vmv_x_s_i16m1_i16(vs_3) * scales[3];
+
+ sumf += d * (isum1 + isum2);
+
+ }
+
+ *s = sumf;
+
+#else
+
+ float sumf = 0;
+
+ int isum[4];
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * q2 = x[i].qs;
+ const int8_t * q8 = y[i].qs;
+ const uint8_t * sc = x[i].scales;
+
+ int summs = 0;
+ for (int j = 0; j < QK_K/16; ++j) {
+ summs += y[i].bsums[j] * (sc[j] >> 4);
+ }
+
+ const float dall = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ isum[0] = isum[1] = isum[2] = isum[3] = 0;
+ for (int l = 0; l < 16; ++l) {
+ isum[0] += q8[l+ 0] * ((q2[l] >> 0) & 3);
+ isum[1] += q8[l+16] * ((q2[l] >> 2) & 3);
+ isum[2] += q8[l+32] * ((q2[l] >> 4) & 3);
+ isum[3] += q8[l+48] * ((q2[l] >> 6) & 3);
+ }
+ for (int l = 0; l < 4; ++l) {
+ isum[l] *= (sc[l] & 0xF);
+ }
+ sumf += dall * (isum[0] + isum[1] + isum[2] + isum[3]) - dmin * summs;
+ }
+ *s = sumf;
+#endif
+}
+#endif
+
+#if QK_K == 256
+void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const uint32_t kmask1 = 0x03030303;
+ const uint32_t kmask2 = 0x0f0f0f0f;
+
+ const block_q3_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ uint32_t aux[3];
+ uint32_t utmp[4];
+
+ const uint8x16_t m3b = vdupq_n_u8(0x3);
+#ifdef __ARM_FEATURE_DOTPROD
+ const int32x4_t vzero = vdupq_n_s32(0);
+#endif
+
+ const uint8x16_t m0 = vdupq_n_u8(1);
+ const uint8x16_t m1 = vshlq_n_u8(m0, 1);
+ const uint8x16_t m2 = vshlq_n_u8(m0, 2);
+ const uint8x16_t m3 = vshlq_n_u8(m0, 3);
+ const int8_t m32 = 32;
+
+ ggml_int8x16x4_t q3bytes;
+
+ float sum = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const uint8_t * restrict qh = x[i].hmask;
+ const int8_t * restrict q8 = y[i].qs;
+
+ ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
+
+ ggml_uint8x16x4_t q3h;
+
+ int32_t isum = 0;
+
+ // Set up scales
+ memcpy(aux, x[i].scales, 12);
+ utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
+ utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
+ utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
+ utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
+
+ int8_t * scale = (int8_t *)utmp;
+ for (int j = 0; j < 16; ++j) scale[j] -= m32;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32;
+ const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64;
+ const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64;
+
+ q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2);
+ q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2);
+ q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1);
+ q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1);
+
+ q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0]));
+ q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1]));
+ q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
+ q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3];
+#else
+ int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[0]), vget_low_s8 (q8bytes_1.val[0])),
+ vmull_s8(vget_high_s8(q3bytes.val[0]), vget_high_s8(q8bytes_1.val[0])));
+ int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[1]), vget_low_s8 (q8bytes_1.val[1])),
+ vmull_s8(vget_high_s8(q3bytes.val[1]), vget_high_s8(q8bytes_1.val[1])));
+ int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[2]), vget_low_s8 (q8bytes_1.val[2])),
+ vmull_s8(vget_high_s8(q3bytes.val[2]), vget_high_s8(q8bytes_1.val[2])));
+ int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[3]), vget_low_s8 (q8bytes_1.val[3])),
+ vmull_s8(vget_high_s8(q3bytes.val[3]), vget_high_s8(q8bytes_1.val[3])));
+ isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1] + vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3];
+#endif
+ scale += 4;
+
+ q3h.val[0] = vbicq_u8(m2, qhbits.val[0]);
+ q3h.val[1] = vbicq_u8(m2, qhbits.val[1]);
+ q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1);
+ q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1);
+
+ q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0]));
+ q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1]));
+ q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
+ q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3];
+#else
+ p0 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[0]), vget_low_s8 (q8bytes_2.val[0])),
+ vmull_s8(vget_high_s8(q3bytes.val[0]), vget_high_s8(q8bytes_2.val[0])));
+ p1 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[1]), vget_low_s8 (q8bytes_2.val[1])),
+ vmull_s8(vget_high_s8(q3bytes.val[1]), vget_high_s8(q8bytes_2.val[1])));
+ p2 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[2]), vget_low_s8 (q8bytes_2.val[2])),
+ vmull_s8(vget_high_s8(q3bytes.val[2]), vget_high_s8(q8bytes_2.val[2])));
+ p3 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[3]), vget_low_s8 (q8bytes_2.val[3])),
+ vmull_s8(vget_high_s8(q3bytes.val[3]), vget_high_s8(q8bytes_2.val[3])));
+ isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1] + vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3];
+#endif
+ scale += 4;
+
+ if (j == 0) {
+ qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4);
+ qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4);
+ }
+
+ }
+ sum += d * isum;
+
+ }
+
+ *s = sum;
+
+#elif defined __AVX2__
+
+ const __m256i m3 = _mm256_set1_epi8(3);
+ const __m256i mone = _mm256_set1_epi8(1);
+ const __m128i m32 = _mm_set1_epi8(32);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ uint32_t aux[3];
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ // Set up scales
+ memcpy(aux, x[i].scales, 12);
+ __m128i scales128 = _mm_set_epi32(
+ ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
+ ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
+ (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
+ (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
+ scales128 = _mm_sub_epi8(scales128, m32);
+ const __m256i all_scales = _mm256_cvtepi8_epi16(scales128);
+ const __m128i l_scales = _mm256_extracti128_si256(all_scales, 0);
+ const __m128i h_scales = _mm256_extracti128_si256(all_scales, 1);
+ const __m256i scales[2] = {MM256_SET_M128I(l_scales, l_scales), MM256_SET_M128I(h_scales, h_scales)};
+
+ // high bit
+ const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].hmask);
+
+ // integer accumulator
+ __m256i sumi = _mm256_setzero_si256();
+
+ int bit = 0;
+ int is = 0;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+ // load low 2 bits
+ const __m256i q3bits = _mm256_loadu_si256((const __m256i*)q3); q3 += 32;
+
+ // prepare low and high bits
+ const __m256i q3l_0 = _mm256_and_si256(q3bits, m3);
+ const __m256i q3h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
+ ++bit;
+
+ const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3);
+ const __m256i q3h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
+ ++bit;
+
+ const __m256i q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3);
+ const __m256i q3h_2 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
+ ++bit;
+
+ const __m256i q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3);
+ const __m256i q3h_3 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, bit)), bit), 2);
+ ++bit;
+
+ // load Q8 quants
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+
+ // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
+ // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
+ // and 2 if the high bit was set)
+ __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
+ __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
+ __m256i q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2);
+ __m256i q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3);
+
+ __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
+ __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
+ __m256i p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2);
+ __m256i p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3);
+
+ p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
+ p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
+ p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
+
+ // multiply with scales
+ p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 0)), p16_0);
+ p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 1)), p16_1);
+ p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 2)), p16_2);
+ p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scales[j], get_scale_shuffle_q3k(is + 3)), p16_3);
+
+ // accumulate
+ p16_0 = _mm256_add_epi32(p16_0, p16_1);
+ p16_2 = _mm256_add_epi32(p16_2, p16_3);
+ sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2));
+
+ }
+
+ // multiply with block scale and accumulate
+ acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __AVX__
+
+ const __m128i m3 = _mm_set1_epi8(3);
+ const __m128i mone = _mm_set1_epi8(1);
+ const __m128i m32 = _mm_set1_epi8(32);
+ const __m128i m2 = _mm_set1_epi8(2);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ const uint32_t *aux;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ // Set up scales
+ aux = (const uint32_t *)x[i].scales;
+ __m128i scales128 = _mm_set_epi32(
+ ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
+ ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
+ (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
+ (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
+ scales128 = _mm_sub_epi8(scales128, m32);
+ const __m128i scales_0 = _mm_cvtepi8_epi16(scales128);
+ const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128));
+ const __m128i scales[2] = { scales_0, scales_1 };
+
+ // high bit *128*2 from block_q3_K.hmask[QK_K/8]
+ const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]);
+ const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]);
+
+ // integer accumulator
+ __m128i sumi_0 = _mm_setzero_si128();
+ __m128i sumi_1 = _mm_setzero_si128();
+
+ for (int j = 0; j < QK_K/128; ++j) {
+ // load low 2 bits *64*2 from block_q3_K.qs[QK_K/4]
+ const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
+ const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
+
+ // prepare low and high bits
+ const int bit = j << 2;
+
+ const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3);
+ const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3);
+ const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2);
+ const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2);
+
+ const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3);
+ const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3);
+ const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
+ const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
+
+ const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3);
+ const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3);
+ const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
+ const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
+
+ const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3);
+ const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3);
+ const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
+ const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
+
+ // load Q8 quants from block_q8_K.qs[QK_K]
+ const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+
+ // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
+ // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
+ // and 2 if the high bit was set)
+ __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0);
+ __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1);
+ __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2);
+ __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3);
+ __m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4);
+ __m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5);
+ __m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6);
+ __m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7);
+
+ __m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0);
+ __m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1);
+ __m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2);
+ __m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3);
+ __m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4);
+ __m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5);
+ __m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6);
+ __m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7);
+
+ p16_0 = _mm_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm_sub_epi16(p16_1, q8s_1);
+ p16_2 = _mm_sub_epi16(p16_2, q8s_2);
+ p16_3 = _mm_sub_epi16(p16_3, q8s_3);
+ p16_4 = _mm_sub_epi16(p16_4, q8s_4);
+ p16_5 = _mm_sub_epi16(p16_5, q8s_5);
+ p16_6 = _mm_sub_epi16(p16_6, q8s_6);
+ p16_7 = _mm_sub_epi16(p16_7, q8s_7);
+
+ // multiply with scales
+ __m128i shuffle = _mm_set1_epi16(0x0100);
+ p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7);
+
+ // accumulate
+ p16_0 = _mm_add_epi32(p16_0, p16_1);
+ p16_2 = _mm_add_epi32(p16_2, p16_3);
+ p16_4 = _mm_add_epi32(p16_4, p16_5);
+ p16_6 = _mm_add_epi32(p16_6, p16_7);
+ sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
+ sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6));
+
+ }
+
+ // multiply with block scale and accumulate
+ __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __riscv_v_intrinsic
+
+ uint32_t aux[3];
+ uint32_t utmp[4];
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const uint8_t * restrict qh = x[i].hmask;
+ const int8_t * restrict q8 = y[i].qs;
+
+ memcpy(aux, x[i].scales, 12);
+ utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
+ utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
+ utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
+ utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
+
+ int8_t * scale = (int8_t *)utmp;
+ for (int j = 0; j < 16; ++j) scale[j] -= 32;
+
+
+ size_t vl = 32;
+ uint8_t m = 1;
+
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+ vuint8m1_t vqh = __riscv_vle8_v_u8m1(qh, vl);
+
+ int sum_t = 0;
+
+ for (int j = 0; j < QK_K; j += 128) {
+
+ vl = 32;
+
+ // load Q3
+ vuint8m1_t q3_x = __riscv_vle8_v_u8m1(q3, vl);
+
+ vint8m1_t q3_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q3_x, 0x03, vl));
+ vint8m1_t q3_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x2, vl), 0x03 , vl));
+ vint8m1_t q3_2 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x4, vl), 0x03 , vl));
+ vint8m1_t q3_3 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(q3_x, 0x6, vl), 0x03 , vl));
+
+ // compute mask for subtraction
+ vuint8m1_t qh_m0 = __riscv_vand_vx_u8m1(vqh, m, vl);
+ vbool8_t vmask_0 = __riscv_vmseq_vx_u8m1_b8(qh_m0, 0, vl);
+ vint8m1_t q3_m0 = __riscv_vsub_vx_i8m1_m(vmask_0, q3_0, 0x4, vl);
+ m <<= 1;
+
+ vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
+ vbool8_t vmask_1 = __riscv_vmseq_vx_u8m1_b8(qh_m1, 0, vl);
+ vint8m1_t q3_m1 = __riscv_vsub_vx_i8m1_m(vmask_1, q3_1, 0x4, vl);
+ m <<= 1;
+
+ vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
+ vbool8_t vmask_2 = __riscv_vmseq_vx_u8m1_b8(qh_m2, 0, vl);
+ vint8m1_t q3_m2 = __riscv_vsub_vx_i8m1_m(vmask_2, q3_2, 0x4, vl);
+ m <<= 1;
+
+ vuint8m1_t qh_m3 = __riscv_vand_vx_u8m1(vqh, m, vl);
+ vbool8_t vmask_3 = __riscv_vmseq_vx_u8m1_b8(qh_m3, 0, vl);
+ vint8m1_t q3_m3 = __riscv_vsub_vx_i8m1_m(vmask_3, q3_3, 0x4, vl);
+ m <<= 1;
+
+ // load Q8 and take product with Q3
+ vint16m2_t a0 = __riscv_vwmul_vv_i16m2(q3_m0, __riscv_vle8_v_i8m1(q8, vl), vl);
+ vint16m2_t a1 = __riscv_vwmul_vv_i16m2(q3_m1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
+ vint16m2_t a2 = __riscv_vwmul_vv_i16m2(q3_m2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
+ vint16m2_t a3 = __riscv_vwmul_vv_i16m2(q3_m3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
+
+ vl = 16;
+
+ // retreive lane to multiply with scale
+ vint32m2_t aux0_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 0), (scale[0]), vl);
+ vint32m2_t aux0_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a0, 1), (scale[1]), vl);
+ vint32m2_t aux1_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 0), (scale[2]), vl);
+ vint32m2_t aux1_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a1, 1), (scale[3]), vl);
+ vint32m2_t aux2_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 0), (scale[4]), vl);
+ vint32m2_t aux2_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a2, 1), (scale[5]), vl);
+ vint32m2_t aux3_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 0), (scale[6]), vl);
+ vint32m2_t aux3_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(a3, 1), (scale[7]), vl);
+
+ vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux0_0, aux0_1, vl), vzero, vl);
+ vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux1_0, aux1_1, vl), isum0, vl);
+ vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux2_0, aux2_1, vl), isum1, vl);
+ vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(aux3_0, aux3_1, vl), isum2, vl);
+
+ sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
+
+ q3 += 32; q8 += 128; scale += 8;
+
+ }
+
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+
+ sumf += d*sum_t;
+
+ }
+
+ *s = sumf;
+
+#else
+ // scalar version
+ // This function is written like this so the compiler can manage to vectorize most of it
+ // Using -Ofast, GCC and clang manage to produce code that is within a factor of 2 or so from the
+ // manually vectorized version above. Every other version I tried would run at least 4 times slower.
+ // The ideal situation would be if we could just write the code once, and the compiler would
+ // automatically produce the best possible set of machine instructions, instead of us having to manually
+ // write vectorized versions for AVX, ARM_NEON, etc.
+
+ int8_t aux8[QK_K];
+ int16_t aux16[8];
+ float sums [8];
+ int32_t aux32[8];
+ memset(sums, 0, 8*sizeof(float));
+
+ uint32_t auxs[4];
+ const int8_t * scales = (const int8_t*)auxs;
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q3 = x[i].qs;
+ const uint8_t * restrict hm = x[i].hmask;
+ const int8_t * restrict q8 = y[i].qs;
+ memset(aux32, 0, 8*sizeof(int32_t));
+ int8_t * restrict a = aux8;
+ uint8_t m = 1;
+ for (int j = 0; j < QK_K; j += 128) {
+ for (int l = 0; l < 32; ++l) a[l] = q3[l] & 3;
+ for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
+ a += 32; m <<= 1;
+ for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 2) & 3;
+ for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
+ a += 32; m <<= 1;
+ for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 4) & 3;
+ for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
+ a += 32; m <<= 1;
+ for (int l = 0; l < 32; ++l) a[l] = (q3[l] >> 6) & 3;
+ for (int l = 0; l < 32; ++l) a[l] -= (hm[l] & m ? 0 : 4);
+ a += 32; m <<= 1;
+ q3 += 32;
+ }
+ a = aux8;
+
+ memcpy(auxs, x[i].scales, 12);
+ uint32_t tmp = auxs[2];
+ auxs[2] = ((auxs[0] >> 4) & kmask2) | (((tmp >> 4) & kmask1) << 4);
+ auxs[3] = ((auxs[1] >> 4) & kmask2) | (((tmp >> 6) & kmask1) << 4);
+ auxs[0] = (auxs[0] & kmask2) | (((tmp >> 0) & kmask1) << 4);
+ auxs[1] = (auxs[1] & kmask2) | (((tmp >> 2) & kmask1) << 4);
+ for (int j = 0; j < QK_K/16; ++j) {
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += (scales[j] - 32) * aux16[l];
+ q8 += 8; a += 8;
+ }
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+
+#endif
+
+}
+
+#else
+
+void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q3_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+#ifdef __ARM_FEATURE_DOTPROD
+ const int32x4_t vzero = vdupq_n_s32(0);
+#endif
+
+ const uint8x16_t m3b = vdupq_n_u8(0x3);
+ const uint8x16_t mh = vdupq_n_u8(4);
+
+ ggml_int8x16x4_t q3bytes;
+
+ uint16_t aux16[2];
+ int8_t * scales = (int8_t *)aux16;
+
+ float sum = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ ggml_uint8x16x4_t q3h;
+
+ const uint8x8_t hbits = vld1_u8(x[i].hmask);
+ const uint8x16_t q3bits = vld1q_u8(x[i].qs);
+ const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(y[i].qs);
+
+ const uint16_t a = *(const uint16_t *)x[i].scales;
+ aux16[0] = a & 0x0f0f;
+ aux16[1] = (a >> 4) & 0x0f0f;
+
+ for (int j = 0; j < 4; ++j) scales[j] -= 8;
+
+ int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
+
+ const float d = y[i].d * (float)x[i].d;
+
+ const uint8x16_t htmp = vcombine_u8(hbits, vshr_n_u8(hbits, 1));
+ q3h.val[0] = vandq_u8(mh, vshlq_n_u8(htmp, 2));
+ q3h.val[1] = vandq_u8(mh, htmp);
+ q3h.val[2] = vandq_u8(mh, vshrq_n_u8(htmp, 2));
+ q3h.val[3] = vandq_u8(mh, vshrq_n_u8(htmp, 4));
+
+ q3bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q3bits, m3b), q3h.val[0]));
+ q3bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 2), m3b), q3h.val[1]));
+ q3bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(vshrq_n_u8(q3bits, 4), m3b), q3h.val[2]));
+ q3bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q3bits, 6), q3h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[0], q8bytes.val[0])) * scales[0];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[1], q8bytes.val[1])) * scales[2];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[2], q8bytes.val[2])) * scales[1];
+ isum += vaddvq_s32(vdotq_s32(vzero, q3bytes.val[3], q8bytes.val[3])) * scales[3];
+#else
+ const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q3bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q3bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q3bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q3bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q3bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ isum += vaddvq_s16(p0) * scales[0] + vaddvq_s16(p1) * scales[2] + vaddvq_s16(p2) * scales[1] + vaddvq_s16(p3) * scales[3];
+#endif
+
+ sum += d * isum;
+
+ }
+
+ *s = sum;
+
+#elif defined __AVX2__
+
+ const __m256i m3 = _mm256_set1_epi8(3);
+ const __m256i m1 = _mm256_set1_epi8(1);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ uint64_t aux64;
+
+ uint16_t aux16[2];
+ const int8_t * aux8 = (const int8_t *)aux16;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint16_t a = *(const uint16_t *)x[i].scales;
+ aux16[0] = a & 0x0f0f;
+ aux16[1] = (a >> 4) & 0x0f0f;
+
+ const __m256i scale_0 = MM256_SET_M128I(_mm_set1_epi16(aux8[2] - 8), _mm_set1_epi16(aux8[0] - 8));
+ const __m256i scale_1 = MM256_SET_M128I(_mm_set1_epi16(aux8[3] - 8), _mm_set1_epi16(aux8[1] - 8));
+
+ memcpy(&aux64, x[i].hmask, 8);
+
+ const __m128i haux = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
+ __m256i q3h_0 = MM256_SET_M128I(_mm_srli_epi16(haux, 2), haux);
+ __m256i q3h_1 = _mm256_srli_epi16(q3h_0, 4);
+ q3h_0 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_0, m1), 2);
+ q3h_1 = _mm256_slli_epi16(_mm256_andnot_si256(q3h_1, m1), 2);
+
+ // load low 2 bits
+ const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
+
+ // prepare low and high bits
+ const __m256i q3aux = MM256_SET_M128I(_mm_srli_epi16(q3bits, 2), q3bits);
+ const __m256i q3l_0 = _mm256_and_si256(q3aux, m3);
+ const __m256i q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3aux, 4), m3);
+
+ // load Q8 quants
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
+ // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
+ // and 2 if the high bit was set)
+ const __m256i q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0);
+ const __m256i q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1);
+
+ __m256i p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0);
+ __m256i p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1);
+
+ p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
+
+ // multiply with scales
+ p16_0 = _mm256_madd_epi16(scale_0, p16_0);
+ p16_1 = _mm256_madd_epi16(scale_1, p16_1);
+
+ p16_0 = _mm256_add_epi32(p16_0, p16_1);
+
+ // multiply with block scale and accumulate
+ acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16_0), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __AVX__
+
+ const __m128i m3 = _mm_set1_epi8(3);
+ const __m128i m1 = _mm_set1_epi8(1);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ uint64_t aux64;
+
+ uint16_t aux16[2];
+ const int8_t * aux8 = (const int8_t *)aux16;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint16_t a = *(const uint16_t *)x[i].scales;
+ aux16[0] = a & 0x0f0f;
+ aux16[1] = (a >> 4) & 0x0f0f;
+
+ const __m128i scale_0 = _mm_set1_epi16(aux8[0] - 8);
+ const __m128i scale_1 = _mm_set1_epi16(aux8[2] - 8);
+ const __m128i scale_2 = _mm_set1_epi16(aux8[1] - 8);
+ const __m128i scale_3 = _mm_set1_epi16(aux8[3] - 8);
+
+ memcpy(&aux64, x[i].hmask, 8);
+
+ __m128i q3h_0 = _mm_set_epi64x(aux64 >> 1, aux64 >> 0);
+ __m128i q3h_1 = _mm_srli_epi16(q3h_0, 2);
+ __m128i q3h_2 = _mm_srli_epi16(q3h_0, 4);
+ __m128i q3h_3 = _mm_srli_epi16(q3h_0, 6);
+ q3h_0 = _mm_slli_epi16(_mm_andnot_si128(q3h_0, m1), 2);
+ q3h_1 = _mm_slli_epi16(_mm_andnot_si128(q3h_1, m1), 2);
+ q3h_2 = _mm_slli_epi16(_mm_andnot_si128(q3h_2, m1), 2);
+ q3h_3 = _mm_slli_epi16(_mm_andnot_si128(q3h_3, m1), 2);
+
+ // load low 2 bits
+ const __m128i q3bits = _mm_loadu_si128((const __m128i*)q3);
+
+ // prepare low and high bits
+ const __m128i q3l_0 = _mm_and_si128(q3bits, m3);
+ const __m128i q3l_1 = _mm_and_si128(_mm_srli_epi16(q3bits, 2), m3);
+ const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits, 4), m3);
+ const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits, 6), m3);
+
+ // load Q8 quants
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm_maddubs_epi16,
+ // and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
+ // and 2 if the high bit was set)
+ const __m128i q8s_0 = _mm_maddubs_epi16(q3h_0, _mm256_extractf128_si256(q8_0, 0));
+ const __m128i q8s_1 = _mm_maddubs_epi16(q3h_1, _mm256_extractf128_si256(q8_0, 1));
+ const __m128i q8s_2 = _mm_maddubs_epi16(q3h_2, _mm256_extractf128_si256(q8_1, 0));
+ const __m128i q8s_3 = _mm_maddubs_epi16(q3h_3, _mm256_extractf128_si256(q8_1, 1));
+
+ __m128i p16_0 = _mm_maddubs_epi16(q3l_0, _mm256_extractf128_si256(q8_0, 0));
+ __m128i p16_1 = _mm_maddubs_epi16(q3l_1, _mm256_extractf128_si256(q8_0, 1));
+ __m128i p16_2 = _mm_maddubs_epi16(q3l_2, _mm256_extractf128_si256(q8_1, 0));
+ __m128i p16_3 = _mm_maddubs_epi16(q3l_3, _mm256_extractf128_si256(q8_1, 1));
+
+ p16_0 = _mm_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm_sub_epi16(p16_1, q8s_1);
+ p16_2 = _mm_sub_epi16(p16_2, q8s_2);
+ p16_3 = _mm_sub_epi16(p16_3, q8s_3);
+
+ // multiply with scales
+ p16_0 = _mm_madd_epi16(scale_0, p16_0);
+ p16_1 = _mm_madd_epi16(scale_1, p16_1);
+ p16_2 = _mm_madd_epi16(scale_2, p16_2);
+ p16_3 = _mm_madd_epi16(scale_3, p16_3);
+
+ p16_0 = _mm_add_epi32(p16_0, p16_2);
+ p16_1 = _mm_add_epi32(p16_1, p16_3);
+ __m256i p16 = MM256_SET_M128I(p16_1, p16_0);
+
+ // multiply with block scale and accumulate
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(p16)), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __riscv_v_intrinsic
+
+ uint16_t aux16[2];
+ int8_t * scales = (int8_t *)aux16;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q3 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint16_t a = *(const uint16_t *)x[i].scales;
+ aux16[0] = a & 0x0f0f;
+ aux16[1] = (a >> 4) & 0x0f0f;
+
+ for (int j = 0; j < 4; ++j) scales[j] -= 8;
+
+ int32_t isum = -4*(scales[0] * y[i].bsums[0] + scales[2] * y[i].bsums[1] + scales[1] * y[i].bsums[2] + scales[3] * y[i].bsums[3]);
+
+ const float d = y[i].d * (float)x[i].d;
+
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+
+ // load qh
+ vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(x[i].hmask, 8);
+ vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
+
+ size_t vl = 16;
+
+ // extend and combine both qh_x1 and qh_x2
+ vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
+
+ vuint8mf2_t qh_0 = __riscv_vand_vx_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
+ vuint8mf2_t qh_1 = __riscv_vand_vx_u8mf2(qh_x, 0x4, vl);
+ vuint8mf2_t qh_2 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl), 0x4, vl);
+ vuint8mf2_t qh_3 = __riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), 0x4, vl);
+
+ // load Q3
+ vuint8mf2_t q3_x = __riscv_vle8_v_u8mf2(q3, vl);
+
+ vuint8mf2_t q3h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q3_x, 0x3, vl), qh_0, vl);
+ vuint8mf2_t q3h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 2, vl), 0x3, vl), qh_1, vl);
+ vuint8mf2_t q3h_2 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 4, vl), 0x3, vl), qh_2, vl);
+ vuint8mf2_t q3h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q3_x, 0x6, vl), qh_3, vl);
+
+ vint8mf2_t q3_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_0);
+ vint8mf2_t q3_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_1);
+ vint8mf2_t q3_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_2);
+ vint8mf2_t q3_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(q3h_3);
+
+ // load Q8 and take product with Q3
+ vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q3_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
+ vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q3_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
+ vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q3_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
+ vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q3_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
+
+ vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
+ vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
+ vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
+ vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
+
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scales[0];
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scales[2];
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scales[1];
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scales[3];
+
+ sumf += d * isum;
+
+ }
+
+ *s = sumf;
+
+#else
+
+ int8_t aux8[QK_K];
+ int16_t aux16[8];
+ float sums [8];
+ int32_t aux32[8];
+ int32_t scales[4];
+ memset(sums, 0, 8*sizeof(float));
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q3 = x[i].qs;
+ const uint8_t * restrict hm = x[i].hmask;
+ const int8_t * restrict q8 = y[i].qs;
+ int8_t * restrict a = aux8;
+ for (int l = 0; l < 8; ++l) {
+ a[l+ 0] = (int8_t)((q3[l+0] >> 0) & 3) - (hm[l] & 0x01 ? 0 : 4);
+ a[l+ 8] = (int8_t)((q3[l+8] >> 0) & 3) - (hm[l] & 0x02 ? 0 : 4);
+ a[l+16] = (int8_t)((q3[l+0] >> 2) & 3) - (hm[l] & 0x04 ? 0 : 4);
+ a[l+24] = (int8_t)((q3[l+8] >> 2) & 3) - (hm[l] & 0x08 ? 0 : 4);
+ a[l+32] = (int8_t)((q3[l+0] >> 4) & 3) - (hm[l] & 0x10 ? 0 : 4);
+ a[l+40] = (int8_t)((q3[l+8] >> 4) & 3) - (hm[l] & 0x20 ? 0 : 4);
+ a[l+48] = (int8_t)((q3[l+0] >> 6) & 3) - (hm[l] & 0x40 ? 0 : 4);
+ a[l+56] = (int8_t)((q3[l+8] >> 6) & 3) - (hm[l] & 0x80 ? 0 : 4);
+ }
+
+ scales[0] = (x[i].scales[0] & 0xF) - 8;
+ scales[1] = (x[i].scales[0] >> 4) - 8;
+ scales[2] = (x[i].scales[1] & 0xF) - 8;
+ scales[3] = (x[i].scales[1] >> 4) - 8;
+
+ memset(aux32, 0, 8*sizeof(int32_t));
+ for (int j = 0; j < QK_K/16; ++j) {
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] += q8[l] * a[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux32[l] += scales[j] * aux16[l];
+ }
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+
+#endif
+
+}
+#endif
+
+#if QK_K == 256
+void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q4_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+ static const uint32_t kmask1 = 0x3f3f3f3f;
+ static const uint32_t kmask2 = 0x0f0f0f0f;
+ static const uint32_t kmask3 = 0x03030303;
+
+ uint32_t utmp[4];
+
+#ifdef __ARM_NEON
+
+ const uint8x16_t m4b = vdupq_n_u8(0xf);
+#ifdef __ARM_FEATURE_DOTPROD
+ const int32x4_t mzero = vdupq_n_s32(0);
+#endif
+
+ ggml_int8x16x2_t q4bytes;
+ ggml_int8x16x2_t q8bytes;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
+
+ memcpy(utmp, x[i].scales, 12);
+
+ uint32x2_t mins8 = { 0 };
+ mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0);
+ mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1);
+
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[0] &= kmask1;
+
+ const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
+ const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
+ vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
+ sumf -= dmin * vaddvq_s32(prod);
+
+ const uint8_t * scales = (const uint8_t *)utmp;
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ int32_t sumi1 = 0;
+ int32_t sumi2 = 0;
+
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32;
+
+#ifdef __ARM_FEATURE_DOTPROD
+ q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
+ q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
+
+ const int32x4_t p1 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
+ sumi1 += vaddvq_s32(p1) * scales[2*j+0];
+
+ q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
+ q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
+
+ const int32x4_t p2 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
+
+ sumi2 += vaddvq_s32(p2) * scales[2*j+1];
+#else
+ q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
+ q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
+ const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ sumi1 += vaddvq_s16(vaddq_s16(p0, p1)) * scales[2*j+0];
+
+ q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
+ q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ sumi2 += vaddvq_s16(vaddq_s16(p2, p3)) * scales[2*j+1];
+
+#endif
+ }
+
+ sumf += d * (sumi1 + sumi2);
+
+ }
+
+ *s = sumf;
+
+#elif defined __AVX2__
+
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+
+ __m256 acc = _mm256_setzero_ps();
+ __m128 acc_m = _mm_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
+
+ const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
+ const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
+ const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
+ acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m);
+
+ const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
+ const __m256i scales = MM256_SET_M128I(sc128, sc128);
+
+ __m256i sumi = _mm256_setzero_si256();
+
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const __m256i scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
+ const __m256i scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
+
+ const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
+ const __m256i q4l = _mm256_and_si256(q4bits, m4);
+ const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
+
+ const __m256i q8l = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
+ p16l = _mm256_madd_epi16(scale_l, p16l);
+
+ const __m256i q8h = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
+ p16h = _mm256_madd_epi16(scale_h, p16h);
+ const __m256i sumj = _mm256_add_epi32(p16l, p16h);
+
+ sumi = _mm256_add_epi32(sumi, sumj);
+ }
+
+ __m256 vd = _mm256_set1_ps(d);
+ acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
+
+ }
+
+ acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
+ acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
+
+ *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
+
+#elif defined __AVX__
+
+ const __m128i m4 = _mm_set1_epi8(0xF);
+ const __m128i m2 = _mm_set1_epi8(0x2);
+
+ __m256 acc = _mm256_setzero_ps();
+ __m128 acc_m = _mm_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
+ const __m128i scales = _mm_cvtepu8_epi16(utmps);
+ const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
+
+ const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
+ const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
+ const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
+ const __m128i prod = _mm_madd_epi16(mins, q8s);
+ acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m);
+
+ __m128i sumi_0 = _mm_setzero_si128();
+ __m128i sumi_1 = _mm_setzero_si128();
+
+ __m128i shuffle = _mm_set1_epi16(0x0100);
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi16(shuffle, m2);
+
+ __m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
+ const __m128i q4l_0 = _mm_and_si128(q4bits, m4);
+ const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
+ q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
+ const __m128i q4l_1 = _mm_and_si128(q4bits, m4);
+ const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
+
+ const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ __m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0);
+ p16l = _mm_madd_epi16(scale_l, p16l);
+ sumi_0 = _mm_add_epi32(sumi_0, p16l);
+ const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ p16l = _mm_maddubs_epi16(q4l_1, q8l_1);
+ p16l = _mm_madd_epi16(scale_l, p16l);
+ sumi_1 = _mm_add_epi32(sumi_1, p16l);
+
+ const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ __m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0);
+ p16h = _mm_madd_epi16(scale_h, p16h);
+ sumi_0 = _mm_add_epi32(sumi_0, p16h);
+ const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ p16h = _mm_maddubs_epi16(q4h_1, q8h_1);
+ p16h = _mm_madd_epi16(scale_h, p16h);
+ sumi_1 = _mm_add_epi32(sumi_1, p16h);
+
+ }
+
+ __m256 vd = _mm256_set1_ps(d);
+ __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
+ acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
+
+ }
+
+ acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
+ acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
+
+ *s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
+
+#elif defined __riscv_v_intrinsic
+
+ const uint8_t * scales = (const uint8_t*)&utmp[0];
+ const uint8_t * mins = (const uint8_t*)&utmp[2];
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ size_t vl = 8;
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
+ vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
+ vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
+ vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
+ vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
+
+ vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
+ sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ vl = 32;
+
+ int32_t sum_1 = 0;
+ int32_t sum_2 = 0;
+
+ vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
+
+ for (int j = 0; j < QK_K/64; ++j) {
+ // load Q4
+ vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
+
+ // load Q8 and multiply it with lower Q4 nibble
+ vint8m1_t q8_0 = __riscv_vle8_v_i8m1(q8, vl);
+ vint8m1_t q4_0 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
+ vint16m2_t qv_0 = __riscv_vwmul_vv_i16m2(q4_0, q8_0, vl);
+ vint16m1_t vs_0 = __riscv_vredsum_vs_i16m2_i16m1(qv_0, vzero, vl);
+
+ sum_1 += __riscv_vmv_x_s_i16m1_i16(vs_0) * scales[2*j+0];
+
+ // load Q8 and multiply it with upper Q4 nibble
+ vint8m1_t q8_1 = __riscv_vle8_v_i8m1(q8+32, vl);
+ vint8m1_t q4_1 = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
+ vint16m2_t qv_1 = __riscv_vwmul_vv_i16m2(q4_1, q8_1, vl);
+ vint16m1_t vs_1 = __riscv_vredsum_vs_i16m2_i16m1(qv_1, vzero, vl);
+
+ sum_2 += __riscv_vmv_x_s_i16m1_i16(vs_1) * scales[2*j+1];
+
+ q4 += 32; q8 += 64;
+
+ }
+
+ sumf += d*(sum_1 + sum_2);
+
+ }
+
+ *s = sumf;
+
+#else
+
+
+ const uint8_t * scales = (const uint8_t*)&utmp[0];
+ const uint8_t * mins = (const uint8_t*)&utmp[2];
+
+ int8_t aux8[QK_K];
+ int16_t aux16[8];
+ float sums [8];
+ int32_t aux32[8];
+ memset(sums, 0, 8*sizeof(float));
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+ memset(aux32, 0, 8*sizeof(int32_t));
+ int8_t * restrict a = aux8;
+ for (int j = 0; j < QK_K/64; ++j) {
+ for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
+ a += 32;
+ for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
+ a += 32; q4 += 32;
+ }
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ int sumi = 0;
+ for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
+ a = aux8;
+ int is = 0;
+ for (int j = 0; j < QK_K/32; ++j) {
+ int32_t scale = scales[is++];
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ }
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
+ const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
+ sumf -= dmin * sumi;
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+#endif
+}
+#else
+void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q4_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ const uint8x16_t m4b = vdupq_n_u8(0xf);
+
+#ifdef __ARM_FEATURE_DOTPROD
+ const int32x4_t mzero = vdupq_n_s32(0);
+#endif
+
+ float sumf = 0;
+
+ ggml_int8x16x2_t q4bytes;
+ ggml_int8x16x4_t q8bytes;
+
+ float sum_mins = 0.f;
+
+ uint16_t aux16[2];
+ const uint8_t * restrict scales = (const uint8_t *)aux16;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint16_t * restrict a = (const uint16_t *)x[i].scales;
+ aux16[0] = a[0] & 0x0f0f;
+ aux16[1] = (a[0] >> 4) & 0x0f0f;
+
+ const int32_t summi = scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]);
+ sum_mins += y[i].d * (float)x[i].d[1] * summi;
+
+ const float d = y[i].d * (float)x[i].d[0];
+
+ const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4);
+
+#ifdef __ARM_FEATURE_DOTPROD
+ q8bytes = ggml_vld1q_s8_x4(q8);
+ q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
+
+ const int32x4_t p1 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
+ const int32_t sumi1 = vaddvq_s32(p1) * scales[0];
+
+ q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
+
+ const int32x4_t p2 = vdotq_s32(vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[2]), q4bytes.val[1], q8bytes.val[3]);
+ const int32_t sumi2 = vaddvq_s32(p2) * scales[1];
+
+#else
+ q8bytes = ggml_vld1q_s8_x4(q8);
+ q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
+ const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ int32_t sumi1 = vaddvq_s16(vaddq_s16(p0, p1)) * scales[0];
+
+ q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[0]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q4bytes.val[0]), vget_high_s8(q8bytes.val[2])));
+ const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q4bytes.val[1]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q4bytes.val[1]), vget_high_s8(q8bytes.val[3])));
+ int32_t sumi2 = vaddvq_s16(vaddq_s16(p2, p3)) * scales[1];
+
+#endif
+ sumf += d * (sumi1 + sumi2);
+
+ }
+
+ *s = sumf - sum_mins;
+
+#elif defined __AVX2__
+
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ float summs = 0;
+
+ uint16_t aux16[2];
+ const uint8_t * scales = (const uint8_t *)aux16;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
+ const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
+ const __m256 vd = _mm256_set1_ps(d);
+
+ const uint16_t * a = (const uint16_t *)x[i].scales;
+ aux16[0] = a[0] & 0x0f0f;
+ aux16[1] = (a[0] >> 4) & 0x0f0f;
+
+ summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
+ const __m256i q4l = _mm256_and_si256(q4bits, m4);
+ const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4);
+
+ const __m256i q8l = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8h = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ const __m256i p16l = _mm256_maddubs_epi16(q4l, q8l);
+ const __m256i p16h = _mm256_maddubs_epi16(q4h, q8h);
+
+ const __m256i p32l = _mm256_madd_epi16(_mm256_set1_epi16(scales[0]), p16l);
+ acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32l), acc);
+
+ const __m256i p32h = _mm256_madd_epi16(_mm256_set1_epi16(scales[1]), p16h);
+ acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(p32h), acc);
+
+ }
+
+ *s = hsum_float_8(acc) - summs;
+
+#elif defined __AVX__
+
+ const __m128i m4 = _mm_set1_epi8(0xF);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ float summs = 0;
+
+ uint16_t aux16[2];
+ const uint8_t * scales = (const uint8_t *)aux16;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = GGML_FP16_TO_FP32(x[i].d[0]) * y[i].d;
+ const float m = GGML_FP16_TO_FP32(x[i].d[1]) * y[i].d;
+ const __m256 vd = _mm256_set1_ps(d);
+
+ const uint16_t * a = (const uint16_t *)x[i].scales;
+ aux16[0] = a[0] & 0x0f0f;
+ aux16[1] = (a[0] >> 4) & 0x0f0f;
+
+ summs += m * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m256i q4bits = _mm256_loadu_si256((const __m256i*)q4);
+ const __m128i q4bits_0 = _mm256_extractf128_si256(q4bits, 0);
+ const __m128i q4bits_1 = _mm256_extractf128_si256(q4bits, 1);
+ const __m128i q4_0 = _mm_and_si128(q4bits_0, m4);
+ const __m128i q4_1 = _mm_and_si128(q4bits_1, m4);
+ const __m128i q4_2 = _mm_and_si128(_mm_srli_epi16(q4bits_0, 4), m4);
+ const __m128i q4_3 = _mm_and_si128(_mm_srli_epi16(q4bits_1, 4), m4);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ const __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
+ const __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
+ const __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
+ const __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
+
+ const __m128i p32_0 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_0);
+ const __m128i p32_1 = _mm_madd_epi16(_mm_set1_epi16(scales[0]), p16_1);
+ acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_1, p32_0))), acc);
+
+ const __m128i p32_2 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_2);
+ const __m128i p32_3 = _mm_madd_epi16(_mm_set1_epi16(scales[1]), p16_3);
+ acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(MM256_SET_M128I(p32_3, p32_2))), acc);
+
+ }
+
+ *s = hsum_float_8(acc) - summs;
+
+#elif defined __riscv_v_intrinsic
+
+ uint16_t s16[2];
+ const uint8_t * restrict scales = (const uint8_t *)s16;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint16_t * restrict b = (const uint16_t *)x[i].scales;
+ s16[0] = b[0] & 0x0f0f;
+ s16[1] = (b[0] >> 4) & 0x0f0f;
+
+ sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
+
+ size_t vl = 32;
+
+ vint16m1_t vzero = __riscv_vmv_v_x_i16m1(0, 1);
+
+ // load Q4
+ vuint8m1_t q4_x = __riscv_vle8_v_u8m1(q4, vl);
+
+ // load Q8 and multiply it with lower Q4 nibble
+ vint8m1_t q4_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q4_x, 0x0F, vl));
+ vint16m2_t va_0 = __riscv_vwmul_vv_i16m2(q4_a, __riscv_vle8_v_i8m1(q8, vl), vl);
+ vint16m1_t aux1 = __riscv_vredsum_vs_i16m2_i16m1(va_0, vzero, vl);
+
+ sumf += d*scales[0]*__riscv_vmv_x_s_i16m1_i16(aux1);
+
+ // load Q8 and multiply it with upper Q4 nibble
+ vint8m1_t q4_s = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q4_x, 0x04, vl));
+ vint16m2_t va_1 = __riscv_vwmul_vv_i16m2(q4_s, __riscv_vle8_v_i8m1(q8+32, vl), vl);
+ vint16m1_t aux2 = __riscv_vredsum_vs_i16m2_i16m1(va_1, vzero, vl);
+
+ sumf += d*scales[1]*__riscv_vmv_x_s_i16m1_i16(aux2);
+
+ }
+
+ *s = sumf;
+
+#else
+
+ uint8_t aux8[QK_K];
+ int16_t aux16[16];
+ float sums [8];
+ memset(sums, 0, 8*sizeof(float));
+
+ uint16_t s16[2];
+ const uint8_t * restrict scales = (const uint8_t *)s16;
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q4 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+ uint8_t * restrict a = aux8;
+ for (int l = 0; l < 32; ++l) a[l+ 0] = q4[l] & 0xF;
+ for (int l = 0; l < 32; ++l) a[l+32] = q4[l] >> 4;
+
+ const uint16_t * restrict b = (const uint16_t *)x[i].scales;
+ s16[0] = b[0] & 0x0f0f;
+ s16[1] = (b[0] >> 4) & 0x0f0f;
+
+ sumf -= y[i].d * GGML_FP16_TO_FP32(x[i].d[1]) * (scales[2] * (y[i].bsums[0] + y[i].bsums[1]) + scales[3] * (y[i].bsums[2] + y[i].bsums[3]));
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d[0]);
+
+ for (int j = 0; j < QK_K/32; ++j) {
+ for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
+ q8 += 16; a += 16;
+ for (int l = 0; l < 16; ++l) aux16[l] += q8[l] * a[l];
+ q8 += 16; a += 16;
+ const float dl = d * scales[j];
+ for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[l+8]);
+ }
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+#endif
+}
+#endif
+
+#if QK_K == 256
+void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q5_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+ static const uint32_t kmask1 = 0x3f3f3f3f;
+ static const uint32_t kmask2 = 0x0f0f0f0f;
+ static const uint32_t kmask3 = 0x03030303;
+
+ uint32_t utmp[4];
+
+
+#ifdef __ARM_NEON
+
+ const uint8x16_t m4b = vdupq_n_u8(0xf);
+ const uint8x16_t mone = vdupq_n_u8(1);
+ const uint8x16_t mtwo = vdupq_n_u8(2);
+#if defined(__ARM_FEATURE_DOTPROD)
+ const int32x4_t mzero = vdupq_n_s32(0);
+#endif
+
+ ggml_int8x16x4_t q5bytes;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8);
+ const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8));
+ const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
+ vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
+ int32_t sumi_mins = vaddvq_s32(prod);
+
+ const uint8_t * scales = (const uint8_t *)utmp;
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
+
+ ggml_uint8x16x4_t q5h;
+
+ int32_t sumi = 0;
+
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32;
+ const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
+
+ q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
+ q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
+ q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3);
+ q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3);
+ qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2);
+ qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2);
+
+ q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0]));
+ q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1]));
+ q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2]));
+ q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+
+ sumi += vaddvq_s32(vdotq_s32(vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++;
+ sumi += vaddvq_s32(vdotq_s32(vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++;
+#else
+
+ const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q5bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q5bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ sumi += vaddvq_s16(vaddq_s16(p0, p1)) * *scales++;
+
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q5bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q5bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ sumi += vaddvq_s16(vaddq_s16(p2, p3)) * *scales++;
+#endif
+ }
+
+ sumf += d * sumi - dmin * sumi_mins;
+
+ }
+
+ *s = sumf;
+
+#elif defined __AVX2__
+
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+ const __m128i mzero = _mm_setzero_si128();
+ const __m256i mone = _mm256_set1_epi8(1);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ float summs = 0.f;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+#if QK_K == 256
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+#else
+ // TODO
+ const float d = 0, dmin = 0;
+#endif
+
+ const __m256i mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]));
+
+ const __m256i q8sums = _mm256_loadu_si256((const __m256i*)y[i].bsums);
+ const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
+ const __m128i prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s);
+ const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
+ summs += dmin * _mm_extract_epi32(hsum, 0);
+
+ const __m128i sc128 = _mm256_extracti128_si256(mins_and_scales, 0);
+ const __m256i scales = MM256_SET_M128I(sc128, sc128);
+
+ const __m256i hbits = _mm256_loadu_si256((const __m256i*)x[i].qh);
+ __m256i hmask = mone;
+
+ __m256i sumi = _mm256_setzero_si256();
+
+ int bit = 0;
+
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const __m256i scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+0));
+ const __m256i scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2*j+1));
+
+ const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5); q5 += 32;
+
+ const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
+ const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
+ const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0);
+ hmask = _mm256_slli_epi16(hmask, 1);
+
+ const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
+ const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), bit++), 4);
+ const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1);
+ hmask = _mm256_slli_epi16(hmask, 1);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+
+ __m256i p16_0 = _mm256_maddubs_epi16(q5_0, q8_0);
+ __m256i p16_1 = _mm256_maddubs_epi16(q5_1, q8_1);
+
+ p16_0 = _mm256_madd_epi16(scale_0, p16_0);
+ p16_1 = _mm256_madd_epi16(scale_1, p16_1);
+
+ sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
+
+ }
+
+ __m256 vd = _mm256_set1_ps(d);
+ acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc);
+
+ }
+
+ *s = hsum_float_8(acc) + summs;
+
+#elif defined __AVX__
+
+ const __m128i m4 = _mm_set1_epi8(0xF);
+ const __m128i mzero = _mm_setzero_si128();
+ const __m128i mone = _mm_set1_epi8(1);
+ const __m128i m2 = _mm_set1_epi8(2);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ float summs = 0.f;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
+ const __m128i scales = _mm_cvtepu8_epi16(utmps);
+ const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
+
+ const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
+ const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
+ const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
+ const __m128i prod = _mm_madd_epi16(mins, q8s);
+ const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
+ summs += dmin * _mm_extract_epi32(hsum, 0);
+
+ const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]);
+ const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]);
+ __m128i hmask = mone;
+
+ __m128i sumi_0 = _mm_setzero_si128();
+ __m128i sumi_1 = _mm_setzero_si128();
+
+ int bit = 0;
+
+ __m128i shuffle = _mm_set1_epi16(0x0100);
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi16(shuffle, m2);
+ const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi16(shuffle, m2);
+
+ const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
+ const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
+
+ __m128i q5l_0 = _mm_and_si128(q5bits_0, m4);
+ __m128i q5l_1 = _mm_and_si128(q5bits_1, m4);
+ __m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
+ __m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
+ __m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0);
+ __m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1);
+ hmask = _mm_slli_epi16(hmask, 1);
+
+ __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ __m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0);
+ __m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1);
+ p16_0 = _mm_madd_epi16(scale_0, p16_0);
+ p16_1 = _mm_madd_epi16(scale_0, p16_1);
+
+ q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4);
+ q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4);
+ q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
+ q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
+ q5_0 = _mm_add_epi8(q5l_0, q5h_0);
+ q5_1 = _mm_add_epi8(q5l_1, q5h_1);
+ hmask = _mm_slli_epi16(hmask, 1);
+
+ q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ __m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0);
+ __m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1);
+ p16_2 = _mm_madd_epi16(scale_1, p16_2);
+ p16_3 = _mm_madd_epi16(scale_1, p16_3);
+
+ sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
+ sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
+
+ }
+
+ __m256 vd = _mm256_set1_ps(d);
+ __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
+ acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
+
+ }
+
+ *s = hsum_float_8(acc) + summs;
+
+#elif defined __riscv_v_intrinsic
+
+ const uint8_t * scales = (const uint8_t*)&utmp[0];
+ const uint8_t * mins = (const uint8_t*)&utmp[2];
+
+ float sumf = 0;
+ float sums = 0.0;
+
+ size_t vl;
+
+ for (int i = 0; i < nb; ++i) {
+
+ vl = 8;
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const uint8_t * restrict hm = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
+
+ vint16mf2_t q8sums_0 = __riscv_vlse16_v_i16mf2(y[i].bsums, 4, vl);
+ vint16mf2_t q8sums_1 = __riscv_vlse16_v_i16mf2(y[i].bsums+1, 4, vl);
+ vint16mf2_t q8sums = __riscv_vadd_vv_i16mf2(q8sums_0, q8sums_1, vl);
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ vuint8mf4_t mins8 = __riscv_vle8_v_u8mf4(mins, vl);
+ vint16mf2_t v_mins = __riscv_vreinterpret_v_u16mf2_i16mf2(__riscv_vzext_vf2_u16mf2(mins8, vl));
+ vint32m1_t prod = __riscv_vwmul_vv_i32m1(q8sums, v_mins, vl);
+
+ vint32m1_t sumi = __riscv_vredsum_vs_i32m1_i32m1(prod, __riscv_vmv_v_x_i32m1(0, 1), vl);
+ sumf -= dmin * __riscv_vmv_x_s_i32m1_i32(sumi);
+
+ vl = 32;
+ int32_t aux32 = 0;
+ int is = 0;
+
+ uint8_t m = 1;
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+ vuint8m1_t vqh = __riscv_vle8_v_u8m1(hm, vl);
+
+ for (int j = 0; j < QK_K/64; ++j) {
+ // load Q5 and Q8
+ vuint8m1_t q5_x = __riscv_vle8_v_u8m1(q5, vl);
+ vint8m1_t q8_y1 = __riscv_vle8_v_i8m1(q8, vl);
+ vint8m1_t q8_y2 = __riscv_vle8_v_i8m1(q8+32, vl);
+
+ // compute mask for addition
+ vint8m1_t q5_a = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vand_vx_u8m1(q5_x, 0x0F, vl));
+ vuint8m1_t qh_m1 = __riscv_vand_vx_u8m1(vqh, m, vl);
+ vbool8_t vmask_1 = __riscv_vmsne_vx_u8m1_b8(qh_m1, 0, vl);
+ vint8m1_t q5_m1 = __riscv_vadd_vx_i8m1_m(vmask_1, q5_a, 16, vl);
+ m <<= 1;
+
+ vint8m1_t q5_l = __riscv_vreinterpret_v_u8m1_i8m1(__riscv_vsrl_vx_u8m1(q5_x, 0x04, vl));
+ vuint8m1_t qh_m2 = __riscv_vand_vx_u8m1(vqh, m, vl);
+ vbool8_t vmask_2 = __riscv_vmsne_vx_u8m1_b8(qh_m2, 0, vl);
+ vint8m1_t q5_m2 = __riscv_vadd_vx_i8m1_m(vmask_2, q5_l, 16, vl);
+ m <<= 1;
+
+ vint16m2_t v0 = __riscv_vwmul_vv_i16m2(q5_m1, q8_y1, vl);
+ vint16m2_t v1 = __riscv_vwmul_vv_i16m2(q5_m2, q8_y2, vl);
+
+ vint32m4_t vs1 = __riscv_vwmul_vx_i32m4(v0, scales[is++], vl);
+ vint32m4_t vs2 = __riscv_vwmul_vx_i32m4(v1, scales[is++], vl);
+
+ vint32m1_t vacc1 = __riscv_vredsum_vs_i32m4_i32m1(vs1, vzero, vl);
+ vint32m1_t vacc2 = __riscv_vredsum_vs_i32m4_i32m1(vs2, vzero, vl);
+
+ aux32 += __riscv_vmv_x_s_i32m1_i32(vacc1) + __riscv_vmv_x_s_i32m1_i32(vacc2);
+ q5 += 32; q8 += 64;
+
+ }
+
+ vfloat32m1_t vaux = __riscv_vfmul_vf_f32m1(__riscv_vfmv_v_f_f32m1(aux32, 1), d, 1);
+ sums += __riscv_vfmv_f_s_f32m1_f32(vaux);
+
+ }
+
+ *s = sumf+sums;
+
+#else
+
+ const uint8_t * scales = (const uint8_t*)&utmp[0];
+ const uint8_t * mins = (const uint8_t*)&utmp[2];
+
+ int8_t aux8[QK_K];
+ int16_t aux16[8];
+ float sums [8];
+ int32_t aux32[8];
+ memset(sums, 0, 8*sizeof(float));
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q4 = x[i].qs;
+ const uint8_t * restrict hm = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+ memset(aux32, 0, 8*sizeof(int32_t));
+ int8_t * restrict a = aux8;
+ uint8_t m = 1;
+ for (int j = 0; j < QK_K/64; ++j) {
+ for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] & 0xF);
+ for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
+ a += 32; m <<= 1;
+ for (int l = 0; l < 32; ++l) a[l] = (int8_t)(q4[l] >> 4);
+ for (int l = 0; l < 32; ++l) a[l] += (hm[l] & m ? 16 : 0);
+ a += 32; m <<= 1;
+ q4 += 32;
+ }
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ int sumi = 0;
+ for (int j = 0; j < QK_K/16; ++j) sumi += y[i].bsums[j] * mins[j/2];
+ a = aux8;
+ int is = 0;
+ for (int j = 0; j < QK_K/32; ++j) {
+ int32_t scale = scales[is++];
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ }
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
+ const float dmin = GGML_FP16_TO_FP32(x[i].dmin) * y[i].d;
+ sumf -= dmin * sumi;
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+#endif
+}
+
+#else
+
+void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q5_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ const uint8x16_t m4b = vdupq_n_u8(0xf);
+ const uint8x16_t mh = vdupq_n_u8(16);
+#if defined(__ARM_FEATURE_DOTPROD)
+ const int32x4_t mzero = vdupq_n_s32(0);
+#endif
+
+ ggml_int8x16x4_t q5bytes;
+ ggml_uint8x16x4_t q5h;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * (float)x[i].d;
+ const int8_t * sc = x[i].scales;
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const uint8x8_t qhbits = vld1_u8(qh);
+
+ const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5);
+ const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
+
+ const uint8x16_t htmp = vcombine_u8(qhbits, vshr_n_u8(qhbits, 1));
+ q5h.val[0] = vbicq_u8(mh, vshlq_n_u8(htmp, 4));
+ q5h.val[1] = vbicq_u8(mh, vshlq_n_u8(htmp, 2));
+ q5h.val[2] = vbicq_u8(mh, htmp);
+ q5h.val[3] = vbicq_u8(mh, vshrq_n_u8(htmp, 2));
+
+ q5bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[0], m4b)), vreinterpretq_s8_u8(q5h.val[0]));
+ q5bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q5bits.val[1], m4b)), vreinterpretq_s8_u8(q5h.val[1]));
+ q5bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[0], 4)), vreinterpretq_s8_u8(q5h.val[2]));
+ q5bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vshrq_n_u8(q5bits.val[1], 4)), vreinterpretq_s8_u8(q5h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+
+ int32_t sumi1 = sc[0] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]));
+ int32_t sumi2 = sc[1] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[1], q8bytes.val[1]));
+ int32_t sumi3 = sc[2] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]));
+ int32_t sumi4 = sc[3] * vaddvq_s32(vdotq_s32(mzero, q5bytes.val[3], q8bytes.val[3]));
+
+ sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
+
+#else
+
+ const int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q5bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ const int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q5bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ int32_t sumi = sc[0] * vaddvq_s16(p0) + sc[1] * vaddvq_s16(p1);
+
+ const int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q5bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ const int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q5bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q5bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ sumi += sc[2] * vaddvq_s16(p2) + sc[3] * vaddvq_s16(p3);
+
+ sumf += d*sumi;
+#endif
+
+ }
+
+ *s = sumf;
+
+#elif defined __AVX2__
+
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+ const __m256i mone = _mm256_set1_epi8(1);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
+
+ const __m256i scale_l = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[1]), _mm_set1_epi16(x[i].scales[0]));
+ const __m256i scale_h = MM256_SET_M128I(_mm_set1_epi16(x[i].scales[3]), _mm_set1_epi16(x[i].scales[2]));
+
+ int64_t aux64;
+ memcpy(&aux64, x[i].qh, 8);
+ const __m128i haux128 = _mm_set_epi64x(aux64 >> 1, aux64);
+ const __m256i haux256 = MM256_SET_M128I(_mm_srli_epi16(haux128, 2), haux128);
+
+ const __m256i q5h_0 = _mm256_slli_epi16(_mm256_andnot_si256(haux256, mone), 4);
+ const __m256i q5h_1 = _mm256_slli_epi16(_mm256_andnot_si256(_mm256_srli_epi16(haux256, 4), mone), 4);
+
+ const __m256i q5l_0 = _mm256_and_si256(q5bits, m4);
+ const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ const __m256i p16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5l_0, q8_0));
+ const __m256i p16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5l_1, q8_1));
+ const __m256i s16_0 = _mm256_madd_epi16(scale_l, _mm256_maddubs_epi16(q5h_0, q8_0));
+ const __m256i s16_1 = _mm256_madd_epi16(scale_h, _mm256_maddubs_epi16(q5h_1, q8_1));
+
+ const __m256i dot = _mm256_sub_epi32(_mm256_add_epi32(p16_0, p16_1), _mm256_add_epi32(s16_0, s16_1));
+
+ acc = _mm256_fmadd_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(dot), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __AVX__
+
+ const __m128i m4 = _mm_set1_epi8(0xF);
+ const __m128i mone = _mm_set1_epi8(1);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const __m256i q5bits = _mm256_loadu_si256((const __m256i*)q5);
+
+ const __m128i scale_0 = _mm_set1_epi16(x[i].scales[0]);
+ const __m128i scale_1 = _mm_set1_epi16(x[i].scales[1]);
+ const __m128i scale_2 = _mm_set1_epi16(x[i].scales[2]);
+ const __m128i scale_3 = _mm_set1_epi16(x[i].scales[3]);
+
+ int64_t aux64;
+ memcpy(&aux64, x[i].qh, 8);
+ const __m128i haux128_0 = _mm_set_epi64x(aux64 >> 1, aux64);
+ const __m128i haux128_1 = _mm_srli_epi16(haux128_0, 2);
+
+ const __m128i q5h_0 = _mm_slli_epi16(_mm_andnot_si128(haux128_0, mone), 4);
+ const __m128i q5h_1 = _mm_slli_epi16(_mm_andnot_si128(haux128_1, mone), 4);
+ const __m128i q5h_2 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_0, 4), mone), 4);
+ const __m128i q5h_3 = _mm_slli_epi16(_mm_andnot_si128(_mm_srli_epi16(haux128_1, 4), mone), 4);
+
+ const __m128i q5l_0 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 0), m4);
+ const __m128i q5l_1 = _mm_and_si128(_mm256_extractf128_si256(q5bits, 1), m4);
+ const __m128i q5l_2 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 0), 4), m4);
+ const __m128i q5l_3 = _mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q5bits, 1), 4), m4);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ const __m128i p16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5l_0, _mm256_extractf128_si256(q8_0, 0)));
+ const __m128i p16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5l_1, _mm256_extractf128_si256(q8_0, 1)));
+ const __m128i p16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5l_2, _mm256_extractf128_si256(q8_1, 0)));
+ const __m128i p16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5l_3, _mm256_extractf128_si256(q8_1, 1)));
+ const __m128i s16_0 = _mm_madd_epi16(scale_0, _mm_maddubs_epi16(q5h_0, _mm256_extractf128_si256(q8_0, 0)));
+ const __m128i s16_1 = _mm_madd_epi16(scale_1, _mm_maddubs_epi16(q5h_1, _mm256_extractf128_si256(q8_0, 1)));
+ const __m128i s16_2 = _mm_madd_epi16(scale_2, _mm_maddubs_epi16(q5h_2, _mm256_extractf128_si256(q8_1, 0)));
+ const __m128i s16_3 = _mm_madd_epi16(scale_3, _mm_maddubs_epi16(q5h_3, _mm256_extractf128_si256(q8_1, 1)));
+
+ const __m128i dot_0 = _mm_sub_epi32(_mm_add_epi32(p16_0, p16_2), _mm_add_epi32(s16_0, s16_2));
+ const __m128i dot_1 = _mm_sub_epi32(_mm_add_epi32(p16_1, p16_3), _mm_add_epi32(s16_1, s16_3));
+
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_set1_ps(d), _mm256_cvtepi32_ps(MM256_SET_M128I(dot_1, dot_0))), acc);
+
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __riscv_v_intrinsic
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * (float)x[i].d;
+ const int8_t * sc = x[i].scales;
+
+ const uint8_t * restrict q5 = x[i].qs;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+
+ // load qh
+ vuint8mf4_t qh_x1 = __riscv_vle8_v_u8mf4(qh, 8);
+ vuint8mf2_t qh_x2 = __riscv_vlmul_ext_v_u8mf4_u8mf2(__riscv_vsrl_vx_u8mf4(qh_x1, 1, 8));
+
+ size_t vl = 16;
+
+ // combine both qh_1 and qh_2
+ vuint8mf2_t qh_x = __riscv_vslideup_vx_u8mf2(__riscv_vlmul_ext_v_u8mf4_u8mf2(qh_x1), qh_x2, vl/2, vl);
+
+ vuint8mf2_t qh_h0 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
+ vuint8mf2_t qh_h1 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsll_vx_u8mf2(qh_x, 0x2, vl), vl), 16, vl);
+ vuint8mf2_t qh_h2 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(qh_x, vl), 16, vl);
+ vuint8mf2_t qh_h3 = __riscv_vand_vx_u8mf2(__riscv_vnot_v_u8mf2(__riscv_vsrl_vx_u8mf2(qh_x, 0x4, vl), vl), 16, vl);
+
+ vint8mf2_t qh_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h0);
+ vint8mf2_t qh_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h1);
+ vint8mf2_t qh_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h2);
+ vint8mf2_t qh_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(qh_h3);
+
+ // load q5
+ vuint8mf2_t q5_x1 = __riscv_vle8_v_u8mf2(q5, vl);
+ vuint8mf2_t q5_x2 = __riscv_vle8_v_u8mf2(q5+16, vl);
+
+ vint8mf2_t q5s_0 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x1, 0xF, vl));
+ vint8mf2_t q5s_1 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vand_vx_u8mf2(q5_x2, 0xF, vl));
+ vint8mf2_t q5s_2 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x1, 0x4, vl));
+ vint8mf2_t q5s_3 = __riscv_vreinterpret_v_u8mf2_i8mf2(__riscv_vsrl_vx_u8mf2(q5_x2, 0x4, vl));
+
+ vint8mf2_t q5_0 = __riscv_vsub_vv_i8mf2(q5s_0, qh_0, vl);
+ vint8mf2_t q5_1 = __riscv_vsub_vv_i8mf2(q5s_1, qh_1, vl);
+ vint8mf2_t q5_2 = __riscv_vsub_vv_i8mf2(q5s_2, qh_2, vl);
+ vint8mf2_t q5_3 = __riscv_vsub_vv_i8mf2(q5s_3, qh_3, vl);
+
+ // load Q8 and multiply it with Q5
+ vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q5_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
+ vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q5_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
+ vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q5_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
+ vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q5_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
+
+ vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
+ vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
+ vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
+ vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
+
+ int32_t sumi1 = sc[0] * __riscv_vmv_x_s_i32m1_i32(vs_0);
+ int32_t sumi2 = sc[1] * __riscv_vmv_x_s_i32m1_i32(vs_1);
+ int32_t sumi3 = sc[2] * __riscv_vmv_x_s_i32m1_i32(vs_2);
+ int32_t sumi4 = sc[3] * __riscv_vmv_x_s_i32m1_i32(vs_3);
+
+ sumf += d * (sumi1 + sumi2 + sumi3 + sumi4);
+
+ }
+
+ *s = sumf;
+
+#else
+
+ int8_t aux8[QK_K];
+ int16_t aux16[16];
+ float sums [8];
+ memset(sums, 0, 8*sizeof(float));
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q4 = x[i].qs;
+ const uint8_t * restrict hm = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+ int8_t * restrict a = aux8;
+ for (int l = 0; l < 32; ++l) {
+ a[l+ 0] = q4[l] & 0xF;
+ a[l+32] = q4[l] >> 4;
+ }
+ for (int is = 0; is < 8; ++is) {
+ uint8_t m = 1 << is;
+ for (int l = 0; l < 8; ++l) a[8*is + l] -= (hm[l] & m ? 0 : 16);
+ }
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const int8_t * restrict sc = x[i].scales;
+
+ for (int j = 0; j < QK_K/16; ++j) {
+ const float dl = d * sc[j];
+ for (int l = 0; l < 16; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) sums[l] += dl * (aux16[l] + aux16[8+l]);
+ q8 += 16; a += 16;
+ }
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+#endif
+}
+#endif
+
+
+#if QK_K == 256
+void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q6_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ float sum = 0;
+
+ const uint8x16_t m4b = vdupq_n_u8(0xF);
+#if defined(__ARM_FEATURE_DOTPROD)
+ const int32x4_t vzero = vdupq_n_s32(0);
+#endif
+ //const int8x16_t m32s = vdupq_n_s8(32);
+
+ const uint8x16_t mone = vdupq_n_u8(3);
+
+ ggml_int8x16x4_t q6bytes;
+ ggml_uint8x16x4_t q6h;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d_all = GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q6 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const int8_t * restrict scale = x[i].scales;
+
+ const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
+ const int8x16_t scales = vld1q_s8(scale);
+ const ggml_int16x8x2_t q6scales = {vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))};
+
+ const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])),
+ vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))),
+ vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])),
+ vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1]))));
+ int32_t isum_mins = vaddvq_s32(prod);
+
+ int32_t isum = 0;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32;
+ ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64;
+ ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
+
+ q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
+ q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
+ uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2);
+ q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits.val[1], 2);
+ q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+
+ //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
+ //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
+ //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s);
+ //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s);
+ q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0]));
+ q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1]));
+ q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2]));
+ q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+
+ isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
+ scale += 4;
+
+#else
+
+ int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1];
+ scale += 2;
+
+ int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ isum += vaddvq_s16(p2) * scale[0] + vaddvq_s16(p3) * scale[1];
+ scale += 2;
+#endif
+
+ q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
+
+ shifted = vshrq_n_u8(qhbits.val[0], 4);
+ q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits.val[1], 4);
+ q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits.val[0], 6);
+ q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits.val[1], 6);
+ q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+
+ //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s);
+ //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s);
+ //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s);
+ //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s);
+ q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0]));
+ q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1]));
+ q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2]));
+ q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3]));
+
+#if defined(__ARM_FEATURE_DOTPROD)
+
+ isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
+ scale += 4;
+
+ //for (int l = 0; l < 4; ++l) {
+ // const int32x4_t p = vdotq_s32(vzero, q6bytes.val[l], q8bytes.val[l]);
+ // isum += vaddvq_s32(p) * *scale++;
+ //}
+#else
+ p0 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ p1 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1];
+ scale += 2;
+
+ p2 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ p3 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ isum += vaddvq_s16(p2) * scale[0] + vaddvq_s16(p3) * scale[1];
+ scale += 2;
+#endif
+
+ }
+ //sum += isum * d_all * y[i].d;
+ sum += d_all * y[i].d * (isum - 32 * isum_mins);
+
+ }
+ *s = sum;
+
+#elif defined __AVX2__
+
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+ const __m256i m2 = _mm256_set1_epi8(3);
+ const __m256i m32s = _mm256_set1_epi8(32);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q4 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
+
+ __m256i sumi = _mm256_setzero_si256();
+
+ int is = 0;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const __m128i scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 0));
+ const __m128i scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1));
+ const __m128i scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2));
+ const __m128i scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3));
+ is += 4;
+
+ const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
+ const __m256i q4bits2 = _mm256_loadu_si256((const __m256i*)q4); q4 += 32;
+ const __m256i q4bitsH = _mm256_loadu_si256((const __m256i*)qh); qh += 32;
+
+ const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bitsH, m2), 4);
+ const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 2), m2), 4);
+ const __m256i q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 4), m2), 4);
+ const __m256i q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bitsH, 6), m2), 4);
+
+ const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
+ const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1);
+ const __m256i q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2);
+ const __m256i q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_2 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+ const __m256i q8_3 = _mm256_loadu_si256((const __m256i*)q8); q8 += 32;
+
+ __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
+ __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
+ __m256i q8s_2 = _mm256_maddubs_epi16(m32s, q8_2);
+ __m256i q8s_3 = _mm256_maddubs_epi16(m32s, q8_3);
+
+ __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
+ __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
+ __m256i p16_2 = _mm256_maddubs_epi16(q4_2, q8_2);
+ __m256i p16_3 = _mm256_maddubs_epi16(q4_3, q8_3);
+
+ p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
+ p16_2 = _mm256_sub_epi16(p16_2, q8s_2);
+ p16_3 = _mm256_sub_epi16(p16_3, q8s_3);
+
+ p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
+ p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
+ p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2);
+ p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3);
+
+ sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
+ sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3));
+
+ }
+
+ acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __AVX__
+
+ const __m128i m4 = _mm_set1_epi8(0xF);
+ const __m128i m3 = _mm_set1_epi8(3);
+ const __m128i m32s = _mm_set1_epi8(32);
+ const __m128i m2 = _mm_set1_epi8(2);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q4 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
+
+ __m128i sumi_0 = _mm_setzero_si128();
+ __m128i sumi_1 = _mm_setzero_si128();
+
+ __m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
+ const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
+
+ const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
+ const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
+ const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4);
+ const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4);
+ const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4);
+ const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4);
+ const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4);
+ const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4);
+
+ const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
+ const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
+ const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
+ const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
+
+ const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0);
+ const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1);
+ const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2);
+ const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3);
+ const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4);
+ const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5);
+ const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6);
+ const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7);
+
+ const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+ const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
+
+ __m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0);
+ __m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1);
+ __m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2);
+ __m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3);
+ __m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4);
+ __m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5);
+ __m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6);
+ __m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7);
+
+ __m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
+ __m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
+ __m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
+ __m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3);
+ __m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4);
+ __m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5);
+ __m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
+ __m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
+
+ p16_0 = _mm_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm_sub_epi16(p16_1, q8s_1);
+ p16_2 = _mm_sub_epi16(p16_2, q8s_2);
+ p16_3 = _mm_sub_epi16(p16_3, q8s_3);
+ p16_4 = _mm_sub_epi16(p16_4, q8s_4);
+ p16_5 = _mm_sub_epi16(p16_5, q8s_5);
+ p16_6 = _mm_sub_epi16(p16_6, q8s_6);
+ p16_7 = _mm_sub_epi16(p16_7, q8s_7);
+
+ const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi8(shuffle, m2);
+ const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi8(shuffle, m2);
+ const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi8(shuffle, m2);
+ const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle);
+ shuffle = _mm_add_epi8(shuffle, m2);
+
+ p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
+ p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
+ p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
+ p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
+ p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
+ p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5);
+ p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
+ p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7);
+
+ sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
+ sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
+ sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6));
+ sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7));
+
+ }
+
+ __m256i sumi = MM256_SET_M128I(sumi_1, sumi_0);
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __riscv_v_intrinsic
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+
+ const uint8_t * restrict q6 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const int8_t * restrict scale = x[i].scales;
+
+ size_t vl;
+
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+
+ int sum_t = 0;
+ int is = 0;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ vl = 32;
+
+ // load qh
+ vuint8m1_t qh_x = __riscv_vle8_v_u8m1(qh, vl);
+
+ // load Q6
+ vuint8m1_t q6_0 = __riscv_vle8_v_u8m1(q6, vl);
+ vuint8m1_t q6_1 = __riscv_vle8_v_u8m1(q6+32, vl);
+
+ vuint8m1_t q6a_0 = __riscv_vand_vx_u8m1(q6_0, 0x0F, vl);
+ vuint8m1_t q6a_1 = __riscv_vand_vx_u8m1(q6_1, 0x0F, vl);
+ vuint8m1_t q6s_0 = __riscv_vsrl_vx_u8m1(q6_0, 0x04, vl);
+ vuint8m1_t q6s_1 = __riscv_vsrl_vx_u8m1(q6_1, 0x04, vl);
+
+ vuint8m1_t qh_0 = __riscv_vand_vx_u8m1(qh_x, 0x03, vl);
+ vuint8m1_t qh_1 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x2, vl), 0x03 , vl);
+ vuint8m1_t qh_2 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x4, vl), 0x03 , vl);
+ vuint8m1_t qh_3 = __riscv_vand_vx_u8m1(__riscv_vsrl_vx_u8m1(qh_x, 0x6, vl), 0x03 , vl);
+
+ vuint8m1_t qhi_0 = __riscv_vor_vv_u8m1(q6a_0, __riscv_vsll_vx_u8m1(qh_0, 0x04, vl), vl);
+ vuint8m1_t qhi_1 = __riscv_vor_vv_u8m1(q6a_1, __riscv_vsll_vx_u8m1(qh_1, 0x04, vl), vl);
+ vuint8m1_t qhi_2 = __riscv_vor_vv_u8m1(q6s_0, __riscv_vsll_vx_u8m1(qh_2, 0x04, vl), vl);
+ vuint8m1_t qhi_3 = __riscv_vor_vv_u8m1(q6s_1, __riscv_vsll_vx_u8m1(qh_3, 0x04, vl), vl);
+
+ vint8m1_t a_0 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_0), 32, vl);
+ vint8m1_t a_1 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_1), 32, vl);
+ vint8m1_t a_2 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_2), 32, vl);
+ vint8m1_t a_3 = __riscv_vsub_vx_i8m1(__riscv_vreinterpret_v_u8m1_i8m1(qhi_3), 32, vl);
+
+ // load Q8 and take product
+ vint16m2_t va_q_0 = __riscv_vwmul_vv_i16m2(a_0, __riscv_vle8_v_i8m1(q8, vl), vl);
+ vint16m2_t va_q_1 = __riscv_vwmul_vv_i16m2(a_1, __riscv_vle8_v_i8m1(q8+32, vl), vl);
+ vint16m2_t va_q_2 = __riscv_vwmul_vv_i16m2(a_2, __riscv_vle8_v_i8m1(q8+64, vl), vl);
+ vint16m2_t va_q_3 = __riscv_vwmul_vv_i16m2(a_3, __riscv_vle8_v_i8m1(q8+96, vl), vl);
+
+ vl = 16;
+
+ vint32m2_t vaux_0 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 0), scale[is+0], vl);
+ vint32m2_t vaux_1 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_0, 1), scale[is+1], vl);
+ vint32m2_t vaux_2 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 0), scale[is+2], vl);
+ vint32m2_t vaux_3 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_1, 1), scale[is+3], vl);
+ vint32m2_t vaux_4 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 0), scale[is+4], vl);
+ vint32m2_t vaux_5 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_2, 1), scale[is+5], vl);
+ vint32m2_t vaux_6 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 0), scale[is+6], vl);
+ vint32m2_t vaux_7 = __riscv_vwmul_vx_i32m2(__riscv_vget_v_i16m2_i16m1(va_q_3, 1), scale[is+7], vl);
+
+ vint32m1_t isum0 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_0, vaux_1, vl), vzero, vl);
+ vint32m1_t isum1 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_2, vaux_3, vl), isum0, vl);
+ vint32m1_t isum2 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_4, vaux_5, vl), isum1, vl);
+ vint32m1_t isum3 = __riscv_vredsum_vs_i32m2_i32m1(__riscv_vadd_vv_i32m2(vaux_6, vaux_7, vl), isum2, vl);
+
+ sum_t += __riscv_vmv_x_s_i32m1_i32(isum3);
+
+ q6 += 64; qh += 32; q8 += 128; is=8;
+
+ }
+
+ sumf += d * sum_t;
+
+ }
+
+ *s = sumf;
+
+#else
+
+ int8_t aux8[QK_K];
+ int16_t aux16[8];
+ float sums [8];
+ int32_t aux32[8];
+ memset(sums, 0, 8*sizeof(float));
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q4 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+ memset(aux32, 0, 8*sizeof(int32_t));
+ int8_t * restrict a = aux8;
+ for (int j = 0; j < QK_K; j += 128) {
+ for (int l = 0; l < 32; ++l) {
+ a[l + 0] = (int8_t)((q4[l + 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
+ a[l + 32] = (int8_t)((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
+ a[l + 64] = (int8_t)((q4[l + 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
+ a[l + 96] = (int8_t)((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
+ }
+ a += 128;
+ q4 += 64;
+ qh += 32;
+ }
+ a = aux8;
+ int is = 0;
+ for (int j = 0; j < QK_K/16; ++j) {
+ int scale = x[i].scales[is++];
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ }
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+#endif
+}
+
+#else
+
+void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
+ assert(n % QK_K == 0);
+
+ const block_q6_K * restrict x = vx;
+ const block_q8_K * restrict y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_NEON
+
+ float sum = 0;
+
+ const uint8x16_t m4b = vdupq_n_u8(0xF);
+ const int8x16_t m32s = vdupq_n_s8(32);
+#if defined(__ARM_FEATURE_DOTPROD)
+ const int32x4_t vzero = vdupq_n_s32(0);
+#endif
+
+ const uint8x16_t mone = vdupq_n_u8(3);
+
+ ggml_int8x16x4_t q6bytes;
+ ggml_uint8x16x4_t q6h;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d_all = (float)x[i].d;
+
+ const uint8_t * restrict q6 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const int8_t * restrict scale = x[i].scales;
+
+ int32_t isum = 0;
+
+ uint8x16_t qhbits = vld1q_u8(qh);
+ ggml_uint8x16x2_t q6bits = ggml_vld1q_u8_x2(q6);
+ ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8);
+
+ q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits), 4);
+ uint8x16_t shifted = vshrq_n_u8(qhbits, 2);
+ q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits, 4);
+ q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits, 6);
+ q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+
+ q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
+ q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
+ q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[2])), m32s);
+ q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[3])), m32s);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+
+ isum += vaddvq_s32(vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
+ vaddvq_s32(vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
+#else
+
+ int16x8_t p0 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[0]), vget_low_s8 (q8bytes.val[0])),
+ vmull_s8(vget_high_s8(q6bytes.val[0]), vget_high_s8(q8bytes.val[0])));
+ int16x8_t p1 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[1]), vget_low_s8 (q8bytes.val[1])),
+ vmull_s8(vget_high_s8(q6bytes.val[1]), vget_high_s8(q8bytes.val[1])));
+ isum += vaddvq_s16(p0) * scale[0] + vaddvq_s16(p1) * scale[1];
+
+ int16x8_t p2 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[2]), vget_low_s8 (q8bytes.val[2])),
+ vmull_s8(vget_high_s8(q6bytes.val[2]), vget_high_s8(q8bytes.val[2])));
+ int16x8_t p3 = vaddq_s16(vmull_s8(vget_low_s8 (q6bytes.val[3]), vget_low_s8 (q8bytes.val[3])),
+ vmull_s8(vget_high_s8(q6bytes.val[3]), vget_high_s8(q8bytes.val[3])));
+ isum += vaddvq_s16(p2) * scale[2] + vaddvq_s16(p3) * scale[3];
+#endif
+
+ sum += isum * d_all * y[i].d;
+
+ }
+ *s = sum;
+
+#elif defined __AVX2__
+
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+ const __m256i m2 = _mm256_set1_epi8(3);
+ const __m256i m32s = _mm256_set1_epi8(32);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q4 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
+ const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
+ const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
+ const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
+
+ __m256i sumi = _mm256_setzero_si256();
+
+ const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
+ const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
+
+ const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
+ const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
+
+ const __m256i q4h_0 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 2), q4bitsH), m2), 4);
+ const __m256i q4h_1 = _mm256_slli_epi16(_mm256_and_si256(MM256_SET_M128I(_mm_srli_epi16(q4bitsH, 6), _mm_srli_epi16(q4bitsH, 4)), m2), 4);
+
+ const __m256i q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0);
+ const __m256i q4_1 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_1);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ __m256i q8s_0 = _mm256_maddubs_epi16(m32s, q8_0);
+ __m256i q8s_1 = _mm256_maddubs_epi16(m32s, q8_1);
+
+ __m256i p16_0 = _mm256_maddubs_epi16(q4_0, q8_0);
+ __m256i p16_1 = _mm256_maddubs_epi16(q4_1, q8_1);
+
+ p16_0 = _mm256_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm256_sub_epi16(p16_1, q8s_1);
+
+ p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0);
+ p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1);
+
+ sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1));
+
+ acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc);
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __AVX__
+
+ const __m128i m4 = _mm_set1_epi8(0xF);
+ const __m128i m2 = _mm_set1_epi8(3);
+ const __m128i m32s = _mm_set1_epi8(32);
+
+ __m256 acc = _mm256_setzero_ps();
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * restrict q4 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const __m64 scales_1 = _mm_set1_pi8(x[i].scales[0]);
+ const __m64 scales_2 = _mm_set1_pi8(x[i].scales[1]);
+ const __m64 scales_3 = _mm_set1_pi8(x[i].scales[2]);
+ const __m64 scales_4 = _mm_set1_pi8(x[i].scales[3]);
+
+ __m128i sumi_0 = _mm_setzero_si128();
+ __m128i sumi_1 = _mm_setzero_si128();
+
+ const __m128i scale_0 = _mm_set_epi64(scales_2, scales_1);
+ const __m128i scale_1 = _mm_set_epi64(scales_4, scales_3);
+
+ const __m256i q4bits1 = _mm256_loadu_si256((const __m256i*)q4);
+ const __m128i q4bitsH = _mm_loadu_si128((const __m128i*)qh);
+
+ const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH, m2), 4);
+ const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 2), m2), 4);
+ const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 4), m2), 4);
+ const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH, 6), m2), 4);
+
+ const __m128i q4_0 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 0), m4), q4h_0);
+ const __m128i q4_1 = _mm_or_si128(_mm_and_si128(_mm256_extractf128_si256(q4bits1, 1), m4), q4h_1);
+ const __m128i q4_2 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 0), 4), m4), q4h_2);
+ const __m128i q4_3 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(_mm256_extractf128_si256(q4bits1, 1), 4), m4), q4h_3);
+
+ const __m256i q8_0 = _mm256_loadu_si256((const __m256i*)(q8+ 0));
+ const __m256i q8_1 = _mm256_loadu_si256((const __m256i*)(q8+32));
+
+ __m128i q8s_0 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 0));
+ __m128i q8s_1 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_0, 1));
+ __m128i q8s_2 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 0));
+ __m128i q8s_3 = _mm_maddubs_epi16(m32s, _mm256_extractf128_si256(q8_1, 1));
+
+ __m128i p16_0 = _mm_maddubs_epi16(q4_0, _mm256_extractf128_si256(q8_0, 0));
+ __m128i p16_1 = _mm_maddubs_epi16(q4_1, _mm256_extractf128_si256(q8_0, 1));
+ __m128i p16_2 = _mm_maddubs_epi16(q4_2, _mm256_extractf128_si256(q8_1, 0));
+ __m128i p16_3 = _mm_maddubs_epi16(q4_3, _mm256_extractf128_si256(q8_1, 1));
+
+ p16_0 = _mm_sub_epi16(p16_0, q8s_0);
+ p16_1 = _mm_sub_epi16(p16_1, q8s_1);
+ p16_2 = _mm_sub_epi16(p16_2, q8s_2);
+ p16_3 = _mm_sub_epi16(p16_3, q8s_3);
+
+ p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
+ p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
+ p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
+ p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
+
+ sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
+ sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
+
+ acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(MM256_SET_M128I(sumi_1, sumi_0))), acc);
+ }
+
+ *s = hsum_float_8(acc);
+
+#elif defined __riscv_v_intrinsic
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d_all = (float)x[i].d;
+
+ const uint8_t * restrict q6 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+
+ const int8_t * restrict scale = x[i].scales;
+
+ int32_t isum = 0;
+
+ size_t vl = 16;
+
+ vint32m1_t vzero = __riscv_vmv_v_x_i32m1(0, 1);
+
+ // load Q6
+ vuint8mf2_t q6_0 = __riscv_vle8_v_u8mf2(q6, vl);
+ vuint8mf2_t q6_1 = __riscv_vle8_v_u8mf2(q6+16, vl);
+
+ // load qh
+ vuint8mf2_t qh_x = __riscv_vle8_v_u8mf2(qh, vl);
+
+ vuint8mf2_t qh0 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
+ qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
+ vuint8mf2_t qh1 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
+ qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
+ vuint8mf2_t qh2 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
+ qh_x = __riscv_vsrl_vx_u8mf2(qh_x, 0x2, vl);
+ vuint8mf2_t qh3 = __riscv_vsll_vx_u8mf2(__riscv_vand_vx_u8mf2(qh_x, 0x3, vl), 0x4, vl);
+
+ vuint8mf2_t q6h_0 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_0, 0xF, vl), qh0, vl);
+ vuint8mf2_t q6h_1 = __riscv_vor_vv_u8mf2(__riscv_vand_vx_u8mf2(q6_1, 0xF, vl), qh1, vl);
+ vuint8mf2_t q6h_2 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_0, 0x4, vl), qh2, vl);
+ vuint8mf2_t q6h_3 = __riscv_vor_vv_u8mf2(__riscv_vsrl_vx_u8mf2(q6_1, 0x4, vl), qh3, vl);
+
+ vint8mf2_t q6v_0 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_0), 32, vl);
+ vint8mf2_t q6v_1 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_1), 32, vl);
+ vint8mf2_t q6v_2 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_2), 32, vl);
+ vint8mf2_t q6v_3 = __riscv_vsub_vx_i8mf2(__riscv_vreinterpret_v_u8mf2_i8mf2(q6h_3), 32, vl);
+
+ // load Q8 and take product
+ vint16m1_t p0 = __riscv_vwmul_vv_i16m1(q6v_0, __riscv_vle8_v_i8mf2(q8, vl), vl);
+ vint16m1_t p1 = __riscv_vwmul_vv_i16m1(q6v_1, __riscv_vle8_v_i8mf2(q8+16, vl), vl);
+ vint16m1_t p2 = __riscv_vwmul_vv_i16m1(q6v_2, __riscv_vle8_v_i8mf2(q8+32, vl), vl);
+ vint16m1_t p3 = __riscv_vwmul_vv_i16m1(q6v_3, __riscv_vle8_v_i8mf2(q8+48, vl), vl);
+
+ vint32m1_t vs_0 = __riscv_vwredsum_vs_i16m1_i32m1(p0, vzero, vl);
+ vint32m1_t vs_1 = __riscv_vwredsum_vs_i16m1_i32m1(p1, vzero, vl);
+ vint32m1_t vs_2 = __riscv_vwredsum_vs_i16m1_i32m1(p2, vzero, vl);
+ vint32m1_t vs_3 = __riscv_vwredsum_vs_i16m1_i32m1(p3, vzero, vl);
+
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_0) * scale[0];
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_1) * scale[1];
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_2) * scale[2];
+ isum += __riscv_vmv_x_s_i32m1_i32(vs_3) * scale[3];
+
+ sumf += isum * d_all * y[i].d;
+
+ }
+
+ *s = sumf;
+
+#else
+
+ int8_t aux8[QK_K];
+ int16_t aux16[8];
+ float sums [8];
+ int32_t aux32[8];
+ memset(sums, 0, 8*sizeof(float));
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * restrict q4 = x[i].ql;
+ const uint8_t * restrict qh = x[i].qh;
+ const int8_t * restrict q8 = y[i].qs;
+ memset(aux32, 0, 8*sizeof(int32_t));
+ int8_t * restrict a = aux8;
+ for (int l = 0; l < 16; ++l) {
+ a[l+ 0] = (int8_t)((q4[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32;
+ a[l+16] = (int8_t)((q4[l+16] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32;
+ a[l+32] = (int8_t)((q4[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32;
+ a[l+48] = (int8_t)((q4[l+16] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32;
+ }
+ int is = 0;
+ for (int j = 0; j < QK_K/16; ++j) {
+ int scale = x[i].scales[is++];
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ for (int l = 0; l < 8; ++l) aux16[l] = q8[l] * a[l];
+ for (int l = 0; l < 8; ++l) aux32[l] += scale * aux16[l];
+ q8 += 8; a += 8;
+ }
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ for (int l = 0; l < 8; ++l) sums[l] += d * aux32[l];
+ }
+ for (int l = 0; l < 8; ++l) sumf += sums[l];
+ *s = sumf;
+#endif
+}
+
+#endif
--- /dev/null
+#pragma once
+
+#include "ggml-impl.h"
+
+// GGML internal header
+
+#include <stdint.h>
+#include <stddef.h>
+
+#define QK4_0 32
+typedef struct {
+ ggml_fp16_t d; // delta
+ uint8_t qs[QK4_0 / 2]; // nibbles / quants
+} block_q4_0;
+static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
+
+#define QK4_1 32
+typedef struct {
+ ggml_fp16_t d; // delta
+ ggml_fp16_t m; // min
+ uint8_t qs[QK4_1 / 2]; // nibbles / quants
+} block_q4_1;
+static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding");
+
+#define QK5_0 32
+typedef struct {
+ ggml_fp16_t d; // delta
+ uint8_t qh[4]; // 5-th bit of quants
+ uint8_t qs[QK5_0 / 2]; // nibbles / quants
+} block_q5_0;
+static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
+
+#define QK5_1 32
+typedef struct {
+ ggml_fp16_t d; // delta
+ ggml_fp16_t m; // min
+ uint8_t qh[4]; // 5-th bit of quants
+ uint8_t qs[QK5_1 / 2]; // nibbles / quants
+} block_q5_1;
+static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
+
+#define QK8_0 32
+typedef struct {
+ ggml_fp16_t d; // delta
+ int8_t qs[QK8_0]; // quants
+} block_q8_0;
+static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
+
+#define QK8_1 32
+typedef struct {
+ float d; // delta
+ float s; // d * sum(qs[i])
+ int8_t qs[QK8_1]; // quants
+} block_q8_1;
+static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
+
+//
+// Super-block quantization structures
+//
+
+// Super-block size
+#ifdef GGML_QKK_64
+#define QK_K 64
+#define K_SCALE_SIZE 4
+#else
+#define QK_K 256
+#define K_SCALE_SIZE 12
+#endif
+
+// 2-bit quantization
+// weight is represented as x = a * q + b
+// 16 blocks of 16 elements each
+// Effectively 2.5625 bits per weight
+typedef struct {
+ uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
+ uint8_t qs[QK_K/4]; // quants
+ ggml_fp16_t d; // super-block scale for quantized scales
+ ggml_fp16_t dmin; // super-block scale for quantized mins
+} block_q2_K;
+static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
+
+// 3-bit quantization
+// weight is represented as x = a * q
+// 16 blocks of 16 elements each
+// Effectively 3.4375 bits per weight
+#ifdef GGML_QKK_64
+typedef struct {
+ uint8_t hmask[QK_K/8]; // quants - high bit
+ uint8_t qs[QK_K/4]; // quants - low 2 bits
+ uint8_t scales[2];
+ ggml_fp16_t d; // super-block scale
+} block_q3_K;
+static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 2, "wrong q3_K block size/padding");
+#else
+typedef struct {
+ uint8_t hmask[QK_K/8]; // quants - high bit
+ uint8_t qs[QK_K/4]; // quants - low 2 bits
+ uint8_t scales[12]; // scales, quantized with 6 bits
+ ggml_fp16_t d; // super-block scale
+} block_q3_K;
+static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + 12, "wrong q3_K block size/padding");
+#endif
+
+// 4-bit quantization
+// 8 blocks of 32 elements each
+// weight is represented as x = a * q + b
+// Effectively 4.5 bits per weight
+#ifdef GGML_QKK_64
+typedef struct {
+ ggml_fp16_t d[2]; // super-block scales/mins
+ uint8_t scales[2]; // 4-bit block scales/mins
+ uint8_t qs[QK_K/2]; // 4--bit quants
+} block_q4_K;
+static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + QK_K/2 + 2, "wrong q4_K block size/padding");
+#else
+typedef struct {
+ ggml_fp16_t d; // super-block scale for quantized scales
+ ggml_fp16_t dmin; // super-block scale for quantized mins
+ uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
+ uint8_t qs[QK_K/2]; // 4--bit quants
+} block_q4_K;
+static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2, "wrong q4_K block size/padding");
+#endif
+
+// 5-bit quantization
+// 8 blocks of 32 elements each
+// weight is represented as x = a * q + b
+// Effectively 5.5 bits per weight
+#ifdef GGML_QKK_64
+typedef struct {
+ ggml_fp16_t d; // super-block scale
+ int8_t scales[QK_K/16]; // 8-bit block scales
+ uint8_t qh[QK_K/8]; // quants, high bit
+ uint8_t qs[QK_K/2]; // quants, low 4 bits
+} block_q5_K;
+static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding");
+#else
+typedef struct {
+ ggml_fp16_t d; // super-block scale for quantized scales
+ ggml_fp16_t dmin; // super-block scale for quantized mins
+ uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
+ uint8_t qh[QK_K/8]; // quants, high bit
+ uint8_t qs[QK_K/2]; // quants, low 4 bits
+} block_q5_K;
+static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding");
+#endif
+
+// 6-bit quantization
+// weight is represented as x = a * q
+// 16 blocks of 16 elements each
+// Effectively 6.5625 bits per weight
+typedef struct {
+ uint8_t ql[QK_K/2]; // quants, lower 4 bits
+ uint8_t qh[QK_K/4]; // quants, upper 2 bits
+ int8_t scales[QK_K/16]; // scales, quantized with 8 bits
+ ggml_fp16_t d; // super-block scale
+} block_q6_K;
+static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + QK_K / 16 + 3*QK_K/4, "wrong q6_K block size/padding");
+
+// This is only used for intermediate quantization and dot products
+typedef struct {
+ float d; // delta
+ int8_t qs[QK_K]; // quants
+ int16_t bsums[QK_K/16]; // sum of quants in groups of 16
+} block_q8_K;
+static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding");
+
+
+// Quantization
+void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k);
+void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k);
+void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k);
+void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k);
+void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k);
+void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k);
+
+void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict y, int k);
+void quantize_row_q3_K_reference(const float * restrict x, block_q3_K * restrict y, int k);
+void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict y, int k);
+void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict y, int k);
+void quantize_row_q6_K_reference(const float * restrict x, block_q6_K * restrict y, int k);
+void quantize_row_q8_K_reference(const float * restrict x, block_q8_K * restrict y, int k);
+
+void quantize_row_q4_0(const float * restrict x, void * restrict y, int k);
+void quantize_row_q4_1(const float * restrict x, void * restrict y, int k);
+void quantize_row_q5_0(const float * restrict x, void * restrict y, int k);
+void quantize_row_q5_1(const float * restrict x, void * restrict y, int k);
+void quantize_row_q8_0(const float * restrict x, void * restrict y, int k);
+void quantize_row_q8_1(const float * restrict x, void * restrict y, int k);
+
+void quantize_row_q2_K(const float * restrict x, void * restrict y, int k);
+void quantize_row_q3_K(const float * restrict x, void * restrict y, int k);
+void quantize_row_q4_K(const float * restrict x, void * restrict y, int k);
+void quantize_row_q5_K(const float * restrict x, void * restrict y, int k);
+void quantize_row_q6_K(const float * restrict x, void * restrict y, int k);
+void quantize_row_q8_K(const float * restrict x, void * restrict y, int k);
+
+// Dequantization
+void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k);
+void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k);
+void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k);
+void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k);
+void dequantize_row_q8_0(const block_q8_0 * restrict x, float * restrict y, int k);
+//void dequantize_row_q8_1(const block_q8_1 * restrict x, float * restrict y, int k);
+
+void dequantize_row_q2_K(const block_q2_K * restrict x, float * restrict y, int k);
+void dequantize_row_q3_K(const block_q3_K * restrict x, float * restrict y, int k);
+void dequantize_row_q4_K(const block_q4_K * restrict x, float * restrict y, int k);
+void dequantize_row_q5_K(const block_q5_K * restrict x, float * restrict y, int k);
+void dequantize_row_q6_K(const block_q6_K * restrict x, float * restrict y, int k);
+void dequantize_row_q8_K(const block_q8_K * restrict x, float * restrict y, int k);
+
+// Dot product
+void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+
+void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, const void * restrict vx, const void * restrict vy);
#define _CRT_SECURE_NO_DEPRECATE // Disables ridiculous "unsafe" warnigns on Windows
+#define _USE_MATH_DEFINES // For M_PI on MSVC
-#include "ggml.h"
-
-#ifdef GGML_USE_K_QUANTS
-#include "k_quants.h"
-#endif
+#include "ggml-impl.h"
+#include "ggml-quants.h"
#if defined(_MSC_VER) || defined(__MINGW32__)
#include <malloc.h> // using malloc.h with MSC/MINGW
#include <unistd.h>
#endif
-// static_assert should be a #define, but if it's not,
-// fall back to the _Static_assert C11 keyword.
-// if C99 - static_assert is noop
-// ref: https://stackoverflow.com/a/53923785/4039976
-#ifndef static_assert
-#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
-#define static_assert(cond, msg) _Static_assert(cond, msg)
-#else
-#define static_assert(cond, msg) struct global_scope_noop_trick
-#endif
-#endif
-
#if defined(_MSC_VER)
// disable "possible loss of data" to avoid hundreds of casts
// we should just be careful :)
static int pthread_join(pthread_t thread, void * unused) {
(void) unused;
- return (int) WaitForSingleObject(thread, INFINITE);
+ int ret = (int) WaitForSingleObject(thread, INFINITE);
+ CloseHandle(thread);
+ return ret;
}
static int sched_yield (void) {
#include <unistd.h>
#endif
+
#ifdef GGML_USE_CPU_HBM
#include <hbwmalloc.h>
#endif
-// __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
-#if defined(_MSC_VER) && (defined(__AVX2__) || defined(__AVX512F__))
-#ifndef __FMA__
-#define __FMA__
-#endif
-#ifndef __F16C__
-#define __F16C__
-#endif
-#ifndef __SSE3__
-#define __SSE3__
+#if defined(__APPLE__)
+#include <TargetConditionals.h>
#endif
+
+#if (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && \
+ (!defined(TARGET_OS_TV) && !defined(TARGET_OS_WATCH))
+
+#include <sys/wait.h>
+
+void ggml_print_backtrace(void) {
+ /*
+ #include <execinfo.h>
+ #include <dlfcn.h>
+
+ void * trace[100];
+
+ int nptrs = backtrace(trace, sizeof(trace)/sizeof(trace[0]));
+
+ backtrace_symbols_fd(trace, nptrs, STDERR_FILENO);
+ */
+
+ // backtrack_symbols does not show line numbers, use gdb instead
+ char attach[32];
+ snprintf(attach, sizeof(attach), "attach %d", getpid());
+ int pid = fork();
+ if (pid == 0) {
+ execlp("gdb", "gdb", "--batch",
+ "-ex", "set style enabled on",
+ "-ex", attach,
+ "-ex", "bt -frame-info source-and-location",
+ "-ex", "detach",
+ "-ex", "quit",
+ NULL);
+ } else {
+ waitpid(pid, NULL, 0);
+ }
+}
+#else
+void ggml_print_backtrace(void) {
+ // platform not supported
+}
#endif
+#undef MIN
+#undef MAX
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
/*#define GGML_PERF*/
#define GGML_DEBUG 0
#define GGML_GELU_FP16
#define GGML_SOFT_MAX_UNROLL 4
#define GGML_VEC_DOT_UNROLL 2
+#define GGML_VEC_MAD_UNROLL 32
//
// logging
#define GGML_PRINT(...) printf(__VA_ARGS__)
+//
+// end of logging block
+//
+
#ifdef GGML_USE_ACCELERATE
// uncomment to use vDSP for soft max computation
// note: not sure if it is actually faster
//#define GGML_SOFT_MAX_ACCELERATE
#endif
-//
-// logging
-//
-
-#if (GGML_DEBUG >= 1)
-#define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
-#else
-#define GGML_PRINT_DEBUG(...)
-#endif
-
-#if (GGML_DEBUG >= 5)
-#define GGML_PRINT_DEBUG_5(...) printf(__VA_ARGS__)
-#else
-#define GGML_PRINT_DEBUG_5(...)
-#endif
-
-#if (GGML_DEBUG >= 10)
-#define GGML_PRINT_DEBUG_10(...) printf(__VA_ARGS__)
-#else
-#define GGML_PRINT_DEBUG_10(...)
-#endif
-
-#define GGML_PRINT(...) printf(__VA_ARGS__)
-
-//
-// end of logging block
-//
-
#if defined(_MSC_VER) || defined(__MINGW32__)
#define GGML_ALIGNED_MALLOC(size) _aligned_malloc(size, GGML_MEM_ALIGN)
#define GGML_ALIGNED_FREE(ptr) _aligned_free(ptr)
//
#define GGML_TENSOR_UNARY_OP_LOCALS \
- GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne); \
- GGML_TENSOR_LOCALS(size_t, nb0, src0, nb); \
- GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); \
- GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
+ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
+ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
+ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
+ GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
#define GGML_TENSOR_BINARY_OP_LOCALS \
- GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne); \
- GGML_TENSOR_LOCALS(size_t, nb0, src0, nb); \
- GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne); \
- GGML_TENSOR_LOCALS(size_t, nb1, src1, nb); \
- GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); \
- GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
+ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) \
+ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) \
+ GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) \
+ GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) \
+ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) \
+ GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
#if defined(GGML_USE_ACCELERATE)
#include <Accelerate/Accelerate.h>
#include "ggml-opencl.h"
#endif
-#undef MIN
-#undef MAX
-#define MIN(a, b) ((a) < (b) ? (a) : (b))
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-
// floating point type used to accumulate sums
typedef double ggml_float;
-// 16-bit float
-// on Arm, we use __fp16
-// on x86, we use uint16_t
-#ifdef __ARM_NEON
-
-// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
-//
-// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
-//
-#include <arm_neon.h>
-
-#define GGML_COMPUTE_FP16_TO_FP32(x) ((float) (x))
-#define GGML_COMPUTE_FP32_TO_FP16(x) (x)
-
-#define GGML_FP16_TO_FP32(x) ((float) (x))
-#define GGML_FP32_TO_FP16(x) (x)
-
-#else
-
-#ifdef __wasm_simd128__
-#include <wasm_simd128.h>
-#else
-#ifdef __POWER9_VECTOR__
-#include <altivec.h>
-#undef bool
-#define bool _Bool
-#else
-#if defined(_MSC_VER) || defined(__MINGW32__)
-#include <intrin.h>
-#else
-#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) || defined(__SSE3__)
-#if !defined(__riscv)
-#include <immintrin.h>
-#endif
-#endif
-#endif
-#endif
-#endif
-
-#ifdef __riscv_v_intrinsic
-#include <riscv_vector.h>
-#endif
-
-#ifdef __F16C__
-
-#ifdef _MSC_VER
-#define GGML_COMPUTE_FP16_TO_FP32(x) _mm_cvtss_f32(_mm_cvtph_ps(_mm_cvtsi32_si128(x)))
-#define GGML_COMPUTE_FP32_TO_FP16(x) _mm_extract_epi16(_mm_cvtps_ph(_mm_set_ss(x), 0), 0)
-#else
-#define GGML_COMPUTE_FP16_TO_FP32(x) _cvtsh_ss(x)
-#define GGML_COMPUTE_FP32_TO_FP16(x) _cvtss_sh(x, 0)
-#endif
-
-#elif defined(__POWER9_VECTOR__)
-
-#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
-#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
-/* the inline asm below is about 12% faster than the lookup method */
-#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
-#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
-
-static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
- register float f;
- register double d;
- __asm__(
- "mtfprd %0,%2\n"
- "xscvhpdp %0,%0\n"
- "frsp %1,%0\n" :
- /* temp */ "=d"(d),
- /* out */ "=f"(f):
- /* in */ "r"(h));
- return f;
-}
-
-static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
- register double d;
- register ggml_fp16_t r;
- __asm__( /* xscvdphp can work on double or single precision */
- "xscvdphp %0,%2\n"
- "mffprd %1,%0\n" :
- /* temp */ "=d"(d),
- /* out */ "=r"(r):
- /* in */ "f"(f));
- return r;
-}
-
-#else
-
-// FP16 <-> FP32
-// ref: https://github.com/Maratyszcza/FP16
-
-static inline float fp32_from_bits(uint32_t w) {
- union {
- uint32_t as_bits;
- float as_value;
- } fp32;
- fp32.as_bits = w;
- return fp32.as_value;
-}
-
-static inline uint32_t fp32_to_bits(float f) {
- union {
- float as_value;
- uint32_t as_bits;
- } fp32;
- fp32.as_value = f;
- return fp32.as_bits;
-}
-
-static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
- const uint32_t w = (uint32_t) h << 16;
- const uint32_t sign = w & UINT32_C(0x80000000);
- const uint32_t two_w = w + w;
-
- const uint32_t exp_offset = UINT32_C(0xE0) << 23;
-#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
- const float exp_scale = 0x1.0p-112f;
-#else
- const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
-#endif
- const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
-
- const uint32_t magic_mask = UINT32_C(126) << 23;
- const float magic_bias = 0.5f;
- const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
-
- const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
- const uint32_t result = sign |
- (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
- return fp32_from_bits(result);
-}
-
-static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
-#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)
- const float scale_to_inf = 0x1.0p+112f;
- const float scale_to_zero = 0x1.0p-110f;
-#else
- const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
- const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
-#endif
- float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
-
- const uint32_t w = fp32_to_bits(f);
- const uint32_t shl1_w = w + w;
- const uint32_t sign = w & UINT32_C(0x80000000);
- uint32_t bias = shl1_w & UINT32_C(0xFF000000);
- if (bias < UINT32_C(0x71000000)) {
- bias = UINT32_C(0x71000000);
- }
-
- base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
- const uint32_t bits = fp32_to_bits(base);
- const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
- const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
- const uint32_t nonsign = exp_bits + mantissa_bits;
- return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
-}
-
-#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
-#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
-
-#endif // __F16C__
-
-#endif // __ARM_NEON
-
//
// global data
//
// precomputed gelu table for f16 (128 KB)
-static ggml_fp16_t table_gelu_f16[1 << 16];
+static ggml_fp16_t ggml_table_gelu_f16[1 << 16];
// precomputed quick gelu table for f16 (128 KB)
-static ggml_fp16_t table_gelu_quick_f16[1 << 16];
+static ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16];
// precomputed silu table for f16 (128 KB)
-static ggml_fp16_t table_silu_f16[1 << 16];
+static ggml_fp16_t ggml_table_silu_f16[1 << 16];
// precomputed exp table for f16 (128 KB)
-static ggml_fp16_t table_exp_f16[1 << 16];
-
-// precomputed f32 table for f16 (256 KB)
-static float table_f32_f16[1 << 16];
-
-#if defined(__ARM_NEON) || defined(__wasm_simd128__)
-#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
-#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
-#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
-#define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
-#define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
-#define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
-#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
-#define B8(c,s ) B7(c,s, c), B7(c,s, s)
-
-// precomputed tables for expanding 8bits to 8 bytes:
-static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
-static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
-#endif
-
-// On ARM NEON, it's quicker to directly convert x -> x instead of calling into ggml_lookup_fp16_to_fp32,
-// so we define GGML_FP16_TO_FP32 and GGML_FP32_TO_FP16 elsewhere for NEON.
-// This is also true for POWER9.
-#if !defined(GGML_FP16_TO_FP32) || !defined(GGML_FP32_TO_FP16)
-
-inline static float ggml_lookup_fp16_to_fp32(ggml_fp16_t f) {
- uint16_t s;
- memcpy(&s, &f, sizeof(uint16_t));
- return table_f32_f16[s];
-}
+static ggml_fp16_t ggml_table_exp_f16[1 << 16];
-#define GGML_FP16_TO_FP32(x) ggml_lookup_fp16_to_fp32(x)
-#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
-
-#endif
+// precomputed f32 table for f16 (256 KB) (ggml-impl.h)
+float ggml_table_f32_f16[1 << 16];
// note: do not use these inside ggml.c
// these are meant to be used via the ggml.h API
#define ggml_perf_cycles_per_ms() 0
#endif
-
//
// cache line
//
static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
-//
-// quantization
-//
-
-#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
-
-#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
-// multiply int8_t, add results pairwise twice
-static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {
- // Get absolute values of x vectors
- const __m128i ax = _mm_sign_epi8(x, x);
- // Sign the values of the y vectors
- const __m128i sy = _mm_sign_epi8(y, x);
- // Perform multiplication and create 16-bit values
- const __m128i dot = _mm_maddubs_epi16(ax, sy);
- const __m128i ones = _mm_set1_epi16(1);
- return _mm_madd_epi16(ones, dot);
-}
-
-#if __AVX__ || __AVX2__ || __AVX512F__
-// horizontally add 8 floats
-static inline float hsum_float_8(const __m256 x) {
- __m128 res = _mm256_extractf128_ps(x, 1);
- res = _mm_add_ps(res, _mm256_castps256_ps128(x));
- res = _mm_add_ps(res, _mm_movehl_ps(res, res));
- res = _mm_add_ss(res, _mm_movehdup_ps(res));
- return _mm_cvtss_f32(res);
-}
-
-// horizontally add 8 int32_t
-static inline int hsum_i32_8(const __m256i a) {
- const __m128i sum128 = _mm_add_epi32(_mm256_castsi256_si128(a), _mm256_extractf128_si256(a, 1));
- const __m128i hi64 = _mm_unpackhi_epi64(sum128, sum128);
- const __m128i sum64 = _mm_add_epi32(hi64, sum128);
- const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
- return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
-}
-
-// horizontally add 4 int32_t
-static inline int hsum_i32_4(const __m128i a) {
- const __m128i hi64 = _mm_unpackhi_epi64(a, a);
- const __m128i sum64 = _mm_add_epi32(hi64, a);
- const __m128i hi32 = _mm_shuffle_epi32(sum64, _MM_SHUFFLE(2, 3, 0, 1));
- return _mm_cvtsi128_si32(_mm_add_epi32(sum64, hi32));
-}
-
-#if defined(__AVX2__) || defined(__AVX512F__)
-// spread 32 bits to 32 bytes { 0x00, 0xFF }
-static inline __m256i bytes_from_bits_32(const uint8_t * x) {
- uint32_t x32;
- memcpy(&x32, x, sizeof(uint32_t));
- const __m256i shuf_mask = _mm256_set_epi64x(
- 0x0303030303030303, 0x0202020202020202,
- 0x0101010101010101, 0x0000000000000000);
- __m256i bytes = _mm256_shuffle_epi8(_mm256_set1_epi32(x32), shuf_mask);
- const __m256i bit_mask = _mm256_set1_epi64x(0x7fbfdfeff7fbfdfe);
- bytes = _mm256_or_si256(bytes, bit_mask);
- return _mm256_cmpeq_epi8(bytes, _mm256_set1_epi64x(-1));
-}
-
-// Unpack 32 4-bit fields into 32 bytes
-// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
-static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
-{
- const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
- const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
- const __m256i lowMask = _mm256_set1_epi8( 0xF );
- return _mm256_and_si256(lowMask, bytes);
-}
-
-// add int16_t pairwise and return as float vector
-static inline __m256 sum_i16_pairs_float(const __m256i x) {
- const __m256i ones = _mm256_set1_epi16(1);
- const __m256i summed_pairs = _mm256_madd_epi16(ones, x);
- return _mm256_cvtepi32_ps(summed_pairs);
-}
-
-static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
-#if __AVXVNNI__
- const __m256i zero = _mm256_setzero_si256();
- const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy);
- return _mm256_cvtepi32_ps(summed_pairs);
-#else
- // Perform multiplication and create 16-bit values
- const __m256i dot = _mm256_maddubs_epi16(ax, sy);
- return sum_i16_pairs_float(dot);
-#endif
-}
-
-// multiply int8_t, add results pairwise twice and return as float vector
-static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
-#if __AVXVNNIINT8__
- const __m256i zero = _mm256_setzero_si256();
- const __m256i summed_pairs = _mm256_dpbssd_epi32(zero, x, y);
- return _mm256_cvtepi32_ps(summed_pairs);
-#else
- // Get absolute values of x vectors
- const __m256i ax = _mm256_sign_epi8(x, x);
- // Sign the values of the y vectors
- const __m256i sy = _mm256_sign_epi8(y, x);
- return mul_sum_us8_pairs_float(ax, sy);
-#endif
-}
-
-static inline __m128i packNibbles( __m256i bytes )
-{
- // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
-#if __AVX512F__
- const __m256i bytes_srli_4 = _mm256_srli_epi16(bytes, 4); // 0000_0000_abcd_0000
- bytes = _mm256_or_si256(bytes, bytes_srli_4); // 0000_abcd_abcd_efgh
- return _mm256_cvtepi16_epi8(bytes); // abcd_efgh
-#else
- const __m256i lowByte = _mm256_set1_epi16( 0xFF );
- __m256i high = _mm256_andnot_si256( lowByte, bytes );
- __m256i low = _mm256_and_si256( lowByte, bytes );
- high = _mm256_srli_epi16( high, 4 );
- bytes = _mm256_or_si256( low, high );
-
- // Compress uint16_t lanes into bytes
- __m128i r0 = _mm256_castsi256_si128( bytes );
- __m128i r1 = _mm256_extracti128_si256( bytes, 1 );
- return _mm_packus_epi16( r0, r1 );
-#endif
-}
-#elif defined(__AVX__)
-// spread 32 bits to 32 bytes { 0x00, 0xFF }
-static inline __m256i bytes_from_bits_32(const uint8_t * x) {
- uint32_t x32;
- memcpy(&x32, x, sizeof(uint32_t));
- const __m128i shuf_maskl = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
- const __m128i shuf_maskh = _mm_set_epi64x(0x0303030303030303, 0x0202020202020202);
- __m128i bytesl = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskl);
- __m128i bytesh = _mm_shuffle_epi8(_mm_set1_epi32(x32), shuf_maskh);
- const __m128i bit_mask = _mm_set1_epi64x(0x7fbfdfeff7fbfdfe);
- bytesl = _mm_or_si128(bytesl, bit_mask);
- bytesh = _mm_or_si128(bytesh, bit_mask);
- bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
- bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
- return MM256_SET_M128I(bytesh, bytesl);
-}
-
-// Unpack 32 4-bit fields into 32 bytes
-// The output vector contains 32 bytes, each one in [ 0 .. 15 ] interval
-static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
-{
- // Load 16 bytes from memory
- __m128i tmpl = _mm_loadu_si128((const __m128i *)rsi);
- __m128i tmph = _mm_srli_epi16(tmpl, 4);
- const __m128i lowMask = _mm_set1_epi8(0xF);
- tmpl = _mm_and_si128(lowMask, tmpl);
- tmph = _mm_and_si128(lowMask, tmph);
- return MM256_SET_M128I(tmph, tmpl);
-}
-
-// add int16_t pairwise and return as float vector
-static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
- const __m128i ones = _mm_set1_epi16(1);
- const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
- const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
- const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
- return _mm256_cvtepi32_ps(summed_pairs);
-}
-
-static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) {
- const __m128i axl = _mm256_castsi256_si128(ax);
- const __m128i axh = _mm256_extractf128_si256(ax, 1);
- const __m128i syl = _mm256_castsi256_si128(sy);
- const __m128i syh = _mm256_extractf128_si256(sy, 1);
- // Perform multiplication and create 16-bit values
- const __m128i dotl = _mm_maddubs_epi16(axl, syl);
- const __m128i doth = _mm_maddubs_epi16(axh, syh);
- return sum_i16_pairs_float(doth, dotl);
-}
-
-// multiply int8_t, add results pairwise twice and return as float vector
-static inline __m256 mul_sum_i8_pairs_float(const __m256i x, const __m256i y) {
- const __m128i xl = _mm256_castsi256_si128(x);
- const __m128i xh = _mm256_extractf128_si256(x, 1);
- const __m128i yl = _mm256_castsi256_si128(y);
- const __m128i yh = _mm256_extractf128_si256(y, 1);
- // Get absolute values of x vectors
- const __m128i axl = _mm_sign_epi8(xl, xl);
- const __m128i axh = _mm_sign_epi8(xh, xh);
- // Sign the values of the y vectors
- const __m128i syl = _mm_sign_epi8(yl, xl);
- const __m128i syh = _mm_sign_epi8(yh, xh);
- // Perform multiplication and create 16-bit values
- const __m128i dotl = _mm_maddubs_epi16(axl, syl);
- const __m128i doth = _mm_maddubs_epi16(axh, syh);
- return sum_i16_pairs_float(doth, dotl);
-}
-
-static inline __m128i packNibbles( __m128i bytes1, __m128i bytes2 )
-{
- // Move bits within 16-bit lanes from 0000_abcd_0000_efgh into 0000_0000_abcd_efgh
- const __m128i lowByte = _mm_set1_epi16( 0xFF );
- __m128i high = _mm_andnot_si128( lowByte, bytes1 );
- __m128i low = _mm_and_si128( lowByte, bytes1 );
- high = _mm_srli_epi16( high, 4 );
- bytes1 = _mm_or_si128( low, high );
- high = _mm_andnot_si128( lowByte, bytes2 );
- low = _mm_and_si128( lowByte, bytes2 );
- high = _mm_srli_epi16( high, 4 );
- bytes2 = _mm_or_si128( low, high );
-
- return _mm_packus_epi16( bytes1, bytes2);
-}
-#endif
-#elif defined(__SSSE3__)
-// horizontally add 4x4 floats
-static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) {
- __m128 res_0 =_mm_hadd_ps(a, b);
- __m128 res_1 =_mm_hadd_ps(c, d);
- __m128 res =_mm_hadd_ps(res_0, res_1);
- res =_mm_hadd_ps(res, res);
- res =_mm_hadd_ps(res, res);
-
- return _mm_cvtss_f32(res);
-}
-#endif // __AVX__ || __AVX2__ || __AVX512F__
-#endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
-
-#if defined(__ARM_NEON)
-
-#if !defined(__aarch64__)
-
-inline static int32_t vaddvq_s32(int32x4_t v) {
- return vgetq_lane_s32(v, 0) + vgetq_lane_s32(v, 1) + vgetq_lane_s32(v, 2) + vgetq_lane_s32(v, 3);
-}
-
-inline static float vaddvq_f32(float32x4_t v) {
- return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
-}
-
-inline static float vmaxvq_f32(float32x4_t v) {
- return
- MAX(MAX(vgetq_lane_f32(v, 0), vgetq_lane_f32(v, 1)),
- MAX(vgetq_lane_f32(v, 2), vgetq_lane_f32(v, 3)));
-}
-
-inline static int32x4_t vcvtnq_s32_f32(float32x4_t v) {
- int32x4_t res;
-
- res[0] = roundf(vgetq_lane_f32(v, 0));
- res[1] = roundf(vgetq_lane_f32(v, 1));
- res[2] = roundf(vgetq_lane_f32(v, 2));
- res[3] = roundf(vgetq_lane_f32(v, 3));
-
- return res;
-}
-
-#endif
-#endif
-
-#define QK4_0 32
-typedef struct {
- ggml_fp16_t d; // delta
- uint8_t qs[QK4_0 / 2]; // nibbles / quants
-} block_q4_0;
-static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
-
-#define QK4_1 32
-typedef struct {
- ggml_fp16_t d; // delta
- ggml_fp16_t m; // min
- uint8_t qs[QK4_1 / 2]; // nibbles / quants
-} block_q4_1;
-static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_fp16_t) + QK4_1 / 2, "wrong q4_1 block size/padding");
-
-#define QK5_0 32
-typedef struct {
- ggml_fp16_t d; // delta
- uint8_t qh[4]; // 5-th bit of quants
- uint8_t qs[QK5_0 / 2]; // nibbles / quants
-} block_q5_0;
-static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
-
-#define QK5_1 32
-typedef struct {
- ggml_fp16_t d; // delta
- ggml_fp16_t m; // min
- uint8_t qh[4]; // 5-th bit of quants
- uint8_t qs[QK5_1 / 2]; // nibbles / quants
-} block_q5_1;
-static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
-
-#define QK8_0 32
-typedef struct {
- ggml_fp16_t d; // delta
- int8_t qs[QK8_0]; // quants
-} block_q8_0;
-static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
-
-#define QK8_1 32
-typedef struct {
- float d; // delta
- float s; // d * sum(qs[i])
- int8_t qs[QK8_1]; // quants
-} block_q8_1;
-static_assert(sizeof(block_q8_1) == 2*sizeof(float) + QK8_1, "wrong q8_1 block size/padding");
-
-// reference implementation for deterministic creation of model files
-static void quantize_row_q4_0_reference(const float * restrict x, block_q4_0 * restrict y, int k) {
- static const int qk = QK4_0;
-
- assert(k % qk == 0);
-
- const int nb = k / qk;
-
- for (int i = 0; i < nb; i++) {
- float amax = 0.0f; // absolute max
- float max = 0.0f;
-
- for (int j = 0; j < qk; j++) {
- const float v = x[i*qk + j];
- if (amax < fabsf(v)) {
- amax = fabsf(v);
- max = v;
- }
- }
-
- const float d = max / -8;
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = GGML_FP32_TO_FP16(d);
-
- for (int j = 0; j < qk/2; ++j) {
- const float x0 = x[i*qk + 0 + j]*id;
- const float x1 = x[i*qk + qk/2 + j]*id;
-
- const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
- const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
-
- y[i].qs[j] = xi0;
- y[i].qs[j] |= xi1 << 4;
- }
- }
-}
-
-static void quantize_row_q4_0(const float * restrict x, void * restrict y, int k) {
- quantize_row_q4_0_reference(x, y, k);
-}
-
-static void quantize_row_q4_1_reference(const float * restrict x, block_q4_1 * restrict y, int k) {
- const int qk = QK4_1;
-
- assert(k % qk == 0);
-
- const int nb = k / qk;
-
- for (int i = 0; i < nb; i++) {
- float min = FLT_MAX;
- float max = -FLT_MAX;
-
- for (int j = 0; j < qk; j++) {
- const float v = x[i*qk + j];
-
- if (v < min) min = v;
- if (v > max) max = v;
- }
-
- const float d = (max - min) / ((1 << 4) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = GGML_FP32_TO_FP16(d);
- y[i].m = GGML_FP32_TO_FP16(min);
-
- for (int j = 0; j < qk/2; ++j) {
- const float x0 = (x[i*qk + 0 + j] - min)*id;
- const float x1 = (x[i*qk + qk/2 + j] - min)*id;
-
- const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
- const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
-
- y[i].qs[j] = xi0;
- y[i].qs[j] |= xi1 << 4;
- }
- }
-}
-
-static void quantize_row_q4_1(const float * restrict x, void * restrict y, int k) {
- quantize_row_q4_1_reference(x, y, k);
-}
-
-static void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * restrict y, int k) {
- static const int qk = QK5_0;
-
- assert(k % qk == 0);
-
- const int nb = k / qk;
-
- for (int i = 0; i < nb; i++) {
- float amax = 0.0f; // absolute max
- float max = 0.0f;
-
- for (int j = 0; j < qk; j++) {
- const float v = x[i*qk + j];
- if (amax < fabsf(v)) {
- amax = fabsf(v);
- max = v;
- }
- }
-
- const float d = max / -16;
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = GGML_FP32_TO_FP16(d);
-
- uint32_t qh = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const float x0 = x[i*qk + 0 + j]*id;
- const float x1 = x[i*qk + qk/2 + j]*id;
-
- const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
- const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
-
- y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
-
- // get the 5-th bit and store it in qh at the right position
- qh |= ((xi0 & 0x10) >> 4) << (j + 0);
- qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
- }
-
- memcpy(&y[i].qh, &qh, sizeof(qh));
- }
-}
-
-static void quantize_row_q5_0(const float * restrict x, void * restrict y, int k) {
- quantize_row_q5_0_reference(x, y, k);
-}
-
-static void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * restrict y, int k) {
- const int qk = QK5_1;
-
- assert(k % qk == 0);
-
- const int nb = k / qk;
-
- for (int i = 0; i < nb; i++) {
- float min = FLT_MAX;
- float max = -FLT_MAX;
-
- for (int j = 0; j < qk; j++) {
- const float v = x[i*qk + j];
-
- if (v < min) min = v;
- if (v > max) max = v;
- }
-
- const float d = (max - min) / ((1 << 5) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = GGML_FP32_TO_FP16(d);
- y[i].m = GGML_FP32_TO_FP16(min);
-
- uint32_t qh = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const float x0 = (x[i*qk + 0 + j] - min)*id;
- const float x1 = (x[i*qk + qk/2 + j] - min)*id;
-
- const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
- const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
-
- y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
-
- // get the 5-th bit and store it in qh at the right position
- qh |= ((xi0 & 0x10) >> 4) << (j + 0);
- qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
- }
-
- memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
- }
-}
-
-static void quantize_row_q5_1(const float * restrict x, void * restrict y, int k) {
- quantize_row_q5_1_reference(x, y, k);
-}
-
-// reference implementation for deterministic creation of model files
-static void quantize_row_q8_0_reference(const float * restrict x, block_q8_0 * restrict y, int k) {
- assert(k % QK8_0 == 0);
- const int nb = k / QK8_0;
-
- for (int i = 0; i < nb; i++) {
- float amax = 0.0f; // absolute max
-
- for (int j = 0; j < QK8_0; j++) {
- const float v = x[i*QK8_0 + j];
- amax = MAX(amax, fabsf(v));
- }
-
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = GGML_FP32_TO_FP16(d);
-
- for (int j = 0; j < QK8_0; ++j) {
- const float x0 = x[i*QK8_0 + j]*id;
-
- y[i].qs[j] = roundf(x0);
- }
- }
-}
-
-static void quantize_row_q8_0(const float * restrict x, void * restrict vy, int k) {
- assert(QK8_0 == 32);
- assert(k % QK8_0 == 0);
- const int nb = k / QK8_0;
-
- block_q8_0 * restrict y = vy;
-
-#if defined(__ARM_NEON)
- for (int i = 0; i < nb; i++) {
- float32x4_t srcv [8];
- float32x4_t asrcv[8];
- float32x4_t amaxv[8];
-
- for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
- for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
-
- for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
- for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
- for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
-
- const float amax = vmaxvq_f32(amaxv[0]);
-
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = GGML_FP32_TO_FP16(d);
-
- for (int j = 0; j < 8; j++) {
- const float32x4_t v = vmulq_n_f32(srcv[j], id);
- const int32x4_t vi = vcvtnq_s32_f32(v);
-
- y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
- y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
- y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
- y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
- }
- }
-#elif defined(__wasm_simd128__)
- for (int i = 0; i < nb; i++) {
- v128_t srcv [8];
- v128_t asrcv[8];
- v128_t amaxv[8];
-
- for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
- for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
-
- for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
- for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
- for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
-
- const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
- wasm_f32x4_extract_lane(amaxv[0], 1)),
- MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
- wasm_f32x4_extract_lane(amaxv[0], 3)));
-
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = GGML_FP32_TO_FP16(d);
-
- for (int j = 0; j < 8; j++) {
- const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
- const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
-
- y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
- y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
- y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
- y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
- }
- }
-#elif defined(__AVX2__) || defined(__AVX__)
- for (int i = 0; i < nb; i++) {
- // Load elements into 4 AVX vectors
- __m256 v0 = _mm256_loadu_ps( x );
- __m256 v1 = _mm256_loadu_ps( x + 8 );
- __m256 v2 = _mm256_loadu_ps( x + 16 );
- __m256 v3 = _mm256_loadu_ps( x + 24 );
- x += 32;
-
- // Compute max(abs(e)) for the block
- const __m256 signBit = _mm256_set1_ps( -0.0f );
- __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
- maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
- maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
- maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
-
- __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
- max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
- max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
- const float maxScalar = _mm_cvtss_f32( max4 );
-
- // Quantize these floats
- const float d = maxScalar / 127.f;
- y[i].d = GGML_FP32_TO_FP16(d);
- const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
- const __m256 mul = _mm256_set1_ps( id );
-
- // Apply the multiplier
- v0 = _mm256_mul_ps( v0, mul );
- v1 = _mm256_mul_ps( v1, mul );
- v2 = _mm256_mul_ps( v2, mul );
- v3 = _mm256_mul_ps( v3, mul );
-
- // Round to nearest integer
- v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
- v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
- v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
- v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
-
- // Convert floats to integers
- __m256i i0 = _mm256_cvtps_epi32( v0 );
- __m256i i1 = _mm256_cvtps_epi32( v1 );
- __m256i i2 = _mm256_cvtps_epi32( v2 );
- __m256i i3 = _mm256_cvtps_epi32( v3 );
-
-#if defined(__AVX2__)
- // Convert int32 to int16
- i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
- i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
- // Convert int16 to int8
- i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
-
- // We got our precious signed bytes, but the order is now wrong
- // These AVX2 pack instructions process 16-byte pieces independently
- // The following instruction is fixing the order
- const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
- i0 = _mm256_permutevar8x32_epi32( i0, perm );
-
- _mm256_storeu_si256((__m256i *)y[i].qs, i0);
-#else
- // Since we don't have in AVX some necessary functions,
- // we split the registers in half and call AVX2 analogs from SSE
- __m128i ni0 = _mm256_castsi256_si128( i0 );
- __m128i ni1 = _mm256_extractf128_si256( i0, 1);
- __m128i ni2 = _mm256_castsi256_si128( i1 );
- __m128i ni3 = _mm256_extractf128_si256( i1, 1);
- __m128i ni4 = _mm256_castsi256_si128( i2 );
- __m128i ni5 = _mm256_extractf128_si256( i2, 1);
- __m128i ni6 = _mm256_castsi256_si128( i3 );
- __m128i ni7 = _mm256_extractf128_si256( i3, 1);
-
- // Convert int32 to int16
- ni0 = _mm_packs_epi32( ni0, ni1 );
- ni2 = _mm_packs_epi32( ni2, ni3 );
- ni4 = _mm_packs_epi32( ni4, ni5 );
- ni6 = _mm_packs_epi32( ni6, ni7 );
- // Convert int16 to int8
- ni0 = _mm_packs_epi16( ni0, ni2 );
- ni4 = _mm_packs_epi16( ni4, ni6 );
-
- _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
- _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
-#endif
- }
-#else
- // scalar
- quantize_row_q8_0_reference(x, y, k);
-#endif
-}
-
-// reference implementation for deterministic creation of model files
-static void quantize_row_q8_1_reference(const float * restrict x, block_q8_1 * restrict y, int k) {
- assert(QK8_1 == 32);
- assert(k % QK8_1 == 0);
- const int nb = k / QK8_1;
-
- for (int i = 0; i < nb; i++) {
- float amax = 0.0f; // absolute max
-
- for (int j = 0; j < QK8_1; j++) {
- const float v = x[i*QK8_1 + j];
- amax = MAX(amax, fabsf(v));
- }
-
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = d;
-
- int sum = 0;
-
- for (int j = 0; j < QK8_1/2; ++j) {
- const float v0 = x[i*QK8_1 + j]*id;
- const float v1 = x[i*QK8_1 + QK8_1/2 + j]*id;
-
- y[i].qs[ j] = roundf(v0);
- y[i].qs[QK8_1/2 + j] = roundf(v1);
-
- sum += y[i].qs[ j];
- sum += y[i].qs[QK8_1/2 + j];
- }
-
- y[i].s = sum*d;
- }
-}
-
-static void quantize_row_q8_1(const float * restrict x, void * restrict vy, int k) {
- assert(k % QK8_1 == 0);
- const int nb = k / QK8_1;
-
- block_q8_1 * restrict y = vy;
-
-#if defined(__ARM_NEON)
- for (int i = 0; i < nb; i++) {
- float32x4_t srcv [8];
- float32x4_t asrcv[8];
- float32x4_t amaxv[8];
-
- for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
- for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
-
- for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
- for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
- for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
-
- const float amax = vmaxvq_f32(amaxv[0]);
-
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = d;
-
- int32x4_t accv = vdupq_n_s32(0);
-
- for (int j = 0; j < 8; j++) {
- const float32x4_t v = vmulq_n_f32(srcv[j], id);
- const int32x4_t vi = vcvtnq_s32_f32(v);
-
- y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
- y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
- y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
- y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
-
- accv = vaddq_s32(accv, vi);
- }
-
- y[i].s = d * vaddvq_s32(accv);
- }
-#elif defined(__wasm_simd128__)
- for (int i = 0; i < nb; i++) {
- v128_t srcv [8];
- v128_t asrcv[8];
- v128_t amaxv[8];
-
- for (int j = 0; j < 8; j++) srcv[j] = wasm_v128_load(x + i*32 + 4*j);
- for (int j = 0; j < 8; j++) asrcv[j] = wasm_f32x4_abs(srcv[j]);
-
- for (int j = 0; j < 4; j++) amaxv[2*j] = wasm_f32x4_max(asrcv[2*j], asrcv[2*j+1]);
- for (int j = 0; j < 2; j++) amaxv[4*j] = wasm_f32x4_max(amaxv[4*j], amaxv[4*j+2]);
- for (int j = 0; j < 1; j++) amaxv[8*j] = wasm_f32x4_max(amaxv[8*j], amaxv[8*j+4]);
-
- const float amax = MAX(MAX(wasm_f32x4_extract_lane(amaxv[0], 0),
- wasm_f32x4_extract_lane(amaxv[0], 1)),
- MAX(wasm_f32x4_extract_lane(amaxv[0], 2),
- wasm_f32x4_extract_lane(amaxv[0], 3)));
-
- const float d = amax / ((1 << 7) - 1);
- const float id = d ? 1.0f/d : 0.0f;
-
- y[i].d = d;
-
- v128_t accv = wasm_i32x4_splat(0);
-
- for (int j = 0; j < 8; j++) {
- const v128_t v = wasm_f32x4_mul(srcv[j], wasm_f32x4_splat(id));
- const v128_t vi = wasm_i32x4_trunc_sat_f32x4(v);
-
- y[i].qs[4*j + 0] = wasm_i32x4_extract_lane(vi, 0);
- y[i].qs[4*j + 1] = wasm_i32x4_extract_lane(vi, 1);
- y[i].qs[4*j + 2] = wasm_i32x4_extract_lane(vi, 2);
- y[i].qs[4*j + 3] = wasm_i32x4_extract_lane(vi, 3);
-
- accv = wasm_i32x4_add(accv, vi);
- }
-
- y[i].s = d * (wasm_i32x4_extract_lane(accv, 0) +
- wasm_i32x4_extract_lane(accv, 1) +
- wasm_i32x4_extract_lane(accv, 2) +
- wasm_i32x4_extract_lane(accv, 3));
- }
-#elif defined(__AVX2__) || defined(__AVX__)
- for (int i = 0; i < nb; i++) {
- // Load elements into 4 AVX vectors
- __m256 v0 = _mm256_loadu_ps( x );
- __m256 v1 = _mm256_loadu_ps( x + 8 );
- __m256 v2 = _mm256_loadu_ps( x + 16 );
- __m256 v3 = _mm256_loadu_ps( x + 24 );
- x += 32;
-
- // Compute max(abs(e)) for the block
- const __m256 signBit = _mm256_set1_ps( -0.0f );
- __m256 maxAbs = _mm256_andnot_ps( signBit, v0 );
- maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v1 ) );
- maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v2 ) );
- maxAbs = _mm256_max_ps( maxAbs, _mm256_andnot_ps( signBit, v3 ) );
-
- __m128 max4 = _mm_max_ps( _mm256_extractf128_ps( maxAbs, 1 ), _mm256_castps256_ps128( maxAbs ) );
- max4 = _mm_max_ps( max4, _mm_movehl_ps( max4, max4 ) );
- max4 = _mm_max_ss( max4, _mm_movehdup_ps( max4 ) );
- const float maxScalar = _mm_cvtss_f32( max4 );
-
- // Quantize these floats
- const float d = maxScalar / 127.f;
- y[i].d = d;
- const float id = ( maxScalar != 0.0f ) ? 127.f / maxScalar : 0.0f;
- const __m256 mul = _mm256_set1_ps( id );
-
- // Apply the multiplier
- v0 = _mm256_mul_ps( v0, mul );
- v1 = _mm256_mul_ps( v1, mul );
- v2 = _mm256_mul_ps( v2, mul );
- v3 = _mm256_mul_ps( v3, mul );
-
- // Round to nearest integer
- v0 = _mm256_round_ps( v0, _MM_ROUND_NEAREST );
- v1 = _mm256_round_ps( v1, _MM_ROUND_NEAREST );
- v2 = _mm256_round_ps( v2, _MM_ROUND_NEAREST );
- v3 = _mm256_round_ps( v3, _MM_ROUND_NEAREST );
-
- // Convert floats to integers
- __m256i i0 = _mm256_cvtps_epi32( v0 );
- __m256i i1 = _mm256_cvtps_epi32( v1 );
- __m256i i2 = _mm256_cvtps_epi32( v2 );
- __m256i i3 = _mm256_cvtps_epi32( v3 );
-
-#if defined(__AVX2__)
- // Compute the sum of the quants and set y[i].s
- y[i].s = d * hsum_i32_8(_mm256_add_epi32(_mm256_add_epi32(i0, i1), _mm256_add_epi32(i2, i3)));
-
- // Convert int32 to int16
- i0 = _mm256_packs_epi32( i0, i1 ); // 0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6, 7, 12, 13, 14, 15
- i2 = _mm256_packs_epi32( i2, i3 ); // 16, 17, 18, 19, 24, 25, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31
- // Convert int16 to int8
- i0 = _mm256_packs_epi16( i0, i2 ); // 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27, 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31
-
- // We got our precious signed bytes, but the order is now wrong
- // These AVX2 pack instructions process 16-byte pieces independently
- // The following instruction is fixing the order
- const __m256i perm = _mm256_setr_epi32( 0, 4, 1, 5, 2, 6, 3, 7 );
- i0 = _mm256_permutevar8x32_epi32( i0, perm );
-
- _mm256_storeu_si256((__m256i *)y[i].qs, i0);
-#else
- // Since we don't have in AVX some necessary functions,
- // we split the registers in half and call AVX2 analogs from SSE
- __m128i ni0 = _mm256_castsi256_si128( i0 );
- __m128i ni1 = _mm256_extractf128_si256( i0, 1);
- __m128i ni2 = _mm256_castsi256_si128( i1 );
- __m128i ni3 = _mm256_extractf128_si256( i1, 1);
- __m128i ni4 = _mm256_castsi256_si128( i2 );
- __m128i ni5 = _mm256_extractf128_si256( i2, 1);
- __m128i ni6 = _mm256_castsi256_si128( i3 );
- __m128i ni7 = _mm256_extractf128_si256( i3, 1);
-
- // Compute the sum of the quants and set y[i].s
- const __m128i s0 = _mm_add_epi32(_mm_add_epi32(ni0, ni1), _mm_add_epi32(ni2, ni3));
- const __m128i s1 = _mm_add_epi32(_mm_add_epi32(ni4, ni5), _mm_add_epi32(ni6, ni7));
- y[i].s = d * hsum_i32_4(_mm_add_epi32(s0, s1));
-
- // Convert int32 to int16
- ni0 = _mm_packs_epi32( ni0, ni1 );
- ni2 = _mm_packs_epi32( ni2, ni3 );
- ni4 = _mm_packs_epi32( ni4, ni5 );
- ni6 = _mm_packs_epi32( ni6, ni7 );
- // Convert int16 to int8
- ni0 = _mm_packs_epi16( ni0, ni2 );
- ni4 = _mm_packs_epi16( ni4, ni6 );
-
- _mm_storeu_si128((__m128i *)(y[i].qs + 0), ni0);
- _mm_storeu_si128((__m128i *)(y[i].qs + 16), ni4);
-#endif
- }
-#else
- // scalar
- quantize_row_q8_1_reference(x, y, k);
-#endif
-}
-
-static void dequantize_row_q4_0(const block_q4_0 * restrict x, float * restrict y, int k) {
- static const int qk = QK4_0;
-
- assert(k % qk == 0);
-
- const int nb = k / qk;
-
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
-
- for (int j = 0; j < qk/2; ++j) {
- const int x0 = (x[i].qs[j] & 0x0F) - 8;
- const int x1 = (x[i].qs[j] >> 4) - 8;
-
- y[i*qk + j + 0 ] = x0*d;
- y[i*qk + j + qk/2] = x1*d;
- }
- }
-}
-
-static void dequantize_row_q4_1(const block_q4_1 * restrict x, float * restrict y, int k) {
- static const int qk = QK4_1;
-
- assert(k % qk == 0);
-
- const int nb = k / qk;
-
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- const float m = GGML_FP16_TO_FP32(x[i].m);
-
- for (int j = 0; j < qk/2; ++j) {
- const int x0 = (x[i].qs[j] & 0x0F);
- const int x1 = (x[i].qs[j] >> 4);
-
- y[i*qk + j + 0 ] = x0*d + m;
- y[i*qk + j + qk/2] = x1*d + m;
- }
- }
-}
-
-static void dequantize_row_q5_0(const block_q5_0 * restrict x, float * restrict y, int k) {
- static const int qk = QK5_0;
-
- assert(k % qk == 0);
-
- const int nb = k / qk;
-
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
-
- uint32_t qh;
- memcpy(&qh, x[i].qh, sizeof(qh));
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
-
- const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
- const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
-
- y[i*qk + j + 0 ] = x0*d;
- y[i*qk + j + qk/2] = x1*d;
- }
- }
-}
-
-static void dequantize_row_q5_1(const block_q5_1 * restrict x, float * restrict y, int k) {
- static const int qk = QK5_1;
-
- assert(k % qk == 0);
-
- const int nb = k / qk;
-
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
- const float m = GGML_FP16_TO_FP32(x[i].m);
-
- uint32_t qh;
- memcpy(&qh, x[i].qh, sizeof(qh));
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
-
- const int x0 = (x[i].qs[j] & 0x0F) | xh_0;
- const int x1 = (x[i].qs[j] >> 4) | xh_1;
-
- y[i*qk + j + 0 ] = x0*d + m;
- y[i*qk + j + qk/2] = x1*d + m;
- }
- }
-}
-
-static void dequantize_row_q8_0(const void * restrict vx, float * restrict y, int k) {
- static const int qk = QK8_0;
-
- assert(k % qk == 0);
-
- const int nb = k / qk;
-
- const block_q8_0 * restrict x = vx;
-
- for (int i = 0; i < nb; i++) {
- const float d = GGML_FP16_TO_FP32(x[i].d);
-
- for (int j = 0; j < qk; ++j) {
- y[i*qk + j] = x[i].qs[j]*d;
- }
- }
-}
-
-static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y);
-static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y);
-static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
-static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
-static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
-static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
-static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
+static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y);
+static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y);
static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
[GGML_TYPE_I8] = {
.vec_dot = ggml_vec_dot_q4_1_q8_1,
.vec_dot_type = GGML_TYPE_Q8_1,
},
+ [4] = { // GGML_TYPE_Q4_2
+ .type_name = "DEPRECATED",
+ .blck_size = 0,
+ .type_size = 0,
+ .is_quantized = false,
+ .to_float = NULL,
+ .from_float = NULL,
+ .from_float_reference = NULL,
+ .vec_dot = NULL,
+ .vec_dot_type = GGML_TYPE_COUNT,
+ },
+ [5] = { // GGML_TYPE_Q4_3
+ .type_name = "DEPRECATED",
+ .blck_size = 0,
+ .type_size = 0,
+ .is_quantized = false,
+ .to_float = NULL,
+ .from_float = NULL,
+ .from_float_reference = NULL,
+ .vec_dot = NULL,
+ .vec_dot_type = GGML_TYPE_COUNT,
+ },
[GGML_TYPE_Q5_0] = {
.type_name = "q5_0",
.blck_size = QK5_0,
.blck_size = QK8_0,
.type_size = sizeof(block_q8_0),
.is_quantized = true,
- .to_float = dequantize_row_q8_0,
+ .to_float = (ggml_to_float_t) dequantize_row_q8_0,
.from_float = quantize_row_q8_0,
.from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference,
.vec_dot = ggml_vec_dot_q8_0_q8_0,
.from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference,
.vec_dot_type = GGML_TYPE_Q8_1,
},
-#ifdef GGML_USE_K_QUANTS
[GGML_TYPE_Q2_K] = {
.type_name = "q2_K",
.blck_size = QK_K,
.is_quantized = true,
.from_float = quantize_row_q8_K,
}
-#endif
};
// For internal test use
return type_traits[type];
}
-
//
// simd mappings
//
+#if defined(__ARM_NEON)
+#if !defined(__aarch64__)
+
+// 64-bit compatibility
+
+inline static float vaddvq_f32(float32x4_t v) {
+ return vgetq_lane_f32(v, 0) + vgetq_lane_f32(v, 1) + vgetq_lane_f32(v, 2) + vgetq_lane_f32(v, 3);
+}
+
+#endif
+#endif
+
// we define a common set of C macros which map to specific intrinsics based on the current architecture
// we then implement the fundamental computation operations below using only these macros
// adding support for new architectures requires to define the corresponding SIMD macros
#define GGML_F16x8_ADD vaddq_f16
#define GGML_F16x8_MUL vmulq_f16
#define GGML_F16x8_REDUCE(res, x) \
- { \
+ do { \
int offset = GGML_F16_ARR >> 1; \
for (int i = 0; i < offset; ++i) { \
x[i] = vaddq_f16(x[i], x[offset+i]); \
const float32x4_t t0 = vcvt_f32_f16(vget_low_f16 (x[0])); \
const float32x4_t t1 = vcvt_f32_f16(vget_high_f16(x[0])); \
res = (ggml_float) vaddvq_f32(vaddq_f32(t0, t1)); \
- }
+ } while (0)
#define GGML_F16_VEC GGML_F16x8
#define GGML_F16_VEC_ZERO GGML_F16x8_ZERO
#define GGML_F32x8_ADD _mm256_add_ps
#define GGML_F32x8_MUL _mm256_mul_ps
#define GGML_F32x8_REDUCE(res, x) \
-{ \
+do { \
int offset = GGML_F32_ARR >> 1; \
for (int i = 0; i < offset; ++i) { \
x[i] = _mm256_add_ps(x[i], x[offset+i]); \
_mm256_extractf128_ps(x[0], 1)); \
const __m128 t1 = _mm_hadd_ps(t0, t0); \
res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
-}
+} while (0)
// TODO: is this optimal ?
#define GGML_F32_VEC GGML_F32x8
#define GGML_F16_VEC_ZERO GGML_F32Cx4_ZERO
#define GGML_F16_VEC_SET1 GGML_F32Cx4_SET1
#define GGML_F16_VEC_LOAD(p, i) GGML_F32Cx4_LOAD(p)
-#define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
-#define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
-#define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
-#define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
-#define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
-
-#endif
-
-// GGML_F32_ARR / GGML_F16_ARR
-// number of registers to use per step
-#ifdef GGML_SIMD
-#define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
-#define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
-#endif
-
-//
-// fundamental operations
-//
-
-inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
-
-inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
-
-inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
-
-inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
-
-inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
-inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
-inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
-inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
-inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
-inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
-inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
-inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
-inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
-inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
-
-static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
-#ifdef GGML_SIMD
- float sumf = 0.0f;
- const int np = (n & ~(GGML_F32_STEP - 1));
-
- GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
-
- GGML_F32_VEC ax[GGML_F32_ARR];
- GGML_F32_VEC ay[GGML_F32_ARR];
-
- for (int i = 0; i < np; i += GGML_F32_STEP) {
- for (int j = 0; j < GGML_F32_ARR; j++) {
- ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
- ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
-
- sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
- }
- }
-
- // reduce sum0..sum3 to sum0
- GGML_F32_VEC_REDUCE(sumf, sum);
-
- // leftovers
- for (int i = np; i < n; ++i) {
- sumf += x[i]*y[i];
- }
-#else
- // scalar
- ggml_float sumf = 0.0;
- for (int i = 0; i < n; ++i) {
- sumf += (ggml_float)(x[i]*y[i]);
- }
-#endif
-
- *s = sumf;
-}
-
-static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
- ggml_float sumf = 0.0;
-
-#if defined(GGML_SIMD)
- const int np = (n & ~(GGML_F16_STEP - 1));
-
- GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
-
- GGML_F16_VEC ax[GGML_F16_ARR];
- GGML_F16_VEC ay[GGML_F16_ARR];
-
- for (int i = 0; i < np; i += GGML_F16_STEP) {
- for (int j = 0; j < GGML_F16_ARR; j++) {
- ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
- ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
-
- sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
- }
- }
-
- // reduce sum0..sum3 to sum0
- GGML_F16_VEC_REDUCE(sumf, sum);
-
- // leftovers
- for (int i = np; i < n; ++i) {
- sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
- }
-#else
- for (int i = 0; i < n; ++i) {
- sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
- }
-#endif
-
- *s = sumf;
-}
-
-static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const int qk = QK8_0;
- const int nb = n / qk;
-
- assert(n % qk == 0);
-
- const block_q4_0 * restrict x = vx;
- const block_q8_0 * restrict y = vy;
-
-#if defined(__ARM_NEON)
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- float32x4_t sumv1 = vdupq_n_f32(0.0f);
-
- GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
- for (int i = 0; i < nb; i += 2) {
- const block_q4_0 * restrict x0 = &x[i + 0];
- const block_q4_0 * restrict x1 = &x[i + 1];
- const block_q8_0 * restrict y0 = &y[i + 0];
- const block_q8_0 * restrict y1 = &y[i + 1];
-
- const uint8x16_t m4b = vdupq_n_u8(0x0F);
- const int8x16_t s8b = vdupq_n_s8(0x8);
-
- const uint8x16_t v0_0 = vld1q_u8(x0->qs);
- const uint8x16_t v0_1 = vld1q_u8(x1->qs);
-
- // 4-bit -> 8-bit
- const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
- const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
- const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
- const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
-
- // sub 8
- const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
- const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
- const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
- const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
-
- // load y
- const int8x16_t v1_0l = vld1q_s8(y0->qs);
- const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
- const int8x16_t v1_1l = vld1q_s8(y1->qs);
- const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
-
-#if defined(__ARM_FEATURE_DOTPROD)
- // dot product into int32x4_t
- const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
- const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
-
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
-#else
- const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0ls), vget_low_s8 (v1_0l));
- const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l));
- const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hs), vget_low_s8 (v1_0h));
- const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h));
-
- const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1ls), vget_low_s8 (v1_1l));
- const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1ls), vget_high_s8(v1_1l));
- const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hs), vget_low_s8 (v1_1h));
- const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hs), vget_high_s8(v1_1h));
-
- const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
- const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
- const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
- const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
-
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
-#endif
- }
-
- *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
-#elif defined(__AVX2__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
-
- // Main loop
- for (int i = 0; i < nb; ++i) {
- /* Compute combined scale for the block */
- const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
-
- __m256i bx = bytes_from_nibbles_32(x[i].qs);
-
- // Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
- const __m256i off = _mm256_set1_epi8( 8 );
- bx = _mm256_sub_epi8( bx, off );
-
- __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
-
- const __m256 q = mul_sum_i8_pairs_float(bx, by);
-
- /* Multiply q with scale and accumulate */
- acc = _mm256_fmadd_ps( d, q, acc );
- }
-
- *s = hsum_float_8(acc);
-#elif defined(__AVX__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
-
- // Main loop
- for (int i = 0; i < nb; ++i) {
- // Compute combined scale for the block
- const __m256 d = _mm256_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
-
- const __m128i lowMask = _mm_set1_epi8(0xF);
- const __m128i off = _mm_set1_epi8(8);
-
- const __m128i tmp = _mm_loadu_si128((const __m128i *)x[i].qs);
-
- __m128i bx = _mm_and_si128(lowMask, tmp);
- __m128i by = _mm_loadu_si128((const __m128i *)y[i].qs);
- bx = _mm_sub_epi8(bx, off);
- const __m128i i32_0 = mul_sum_i8_pairs(bx, by);
-
- bx = _mm_and_si128(lowMask, _mm_srli_epi64(tmp, 4));
- by = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
- bx = _mm_sub_epi8(bx, off);
- const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
-
- // Convert int32_t to float
- __m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
-
- // Apply the scale, and accumulate
- acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
- }
-
- *s = hsum_float_8(acc);
-#elif defined(__SSSE3__)
- // set constants
- const __m128i lowMask = _mm_set1_epi8(0xF);
- const __m128i off = _mm_set1_epi8(8);
-
- // Initialize accumulator with zeros
- __m128 acc_0 = _mm_setzero_ps();
- __m128 acc_1 = _mm_setzero_ps();
- __m128 acc_2 = _mm_setzero_ps();
- __m128 acc_3 = _mm_setzero_ps();
-
- // First round without accumulation
- {
- _mm_prefetch(&x[0] + sizeof(block_q4_0), _MM_HINT_T0);
- _mm_prefetch(&y[0] + sizeof(block_q8_0), _MM_HINT_T0);
-
- // Compute combined scale for the block 0 and 1
- const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[0].d) * GGML_FP16_TO_FP32(y[0].d) );
-
- const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[0].qs);
-
- __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
- __m128i by_0 = _mm_loadu_si128((const __m128i *)y[0].qs);
- bx_0 = _mm_sub_epi8(bx_0, off);
- const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
-
- __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
- __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[0].qs + 16));
- bx_1 = _mm_sub_epi8(bx_1, off);
- const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
-
- _mm_prefetch(&x[1] + sizeof(block_q4_0), _MM_HINT_T0);
- _mm_prefetch(&y[1] + sizeof(block_q8_0), _MM_HINT_T0);
-
- // Compute combined scale for the block 2 and 3
- const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[1].d) * GGML_FP16_TO_FP32(y[1].d) );
-
- const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[1].qs);
-
- __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
- __m128i by_2 = _mm_loadu_si128((const __m128i *)y[1].qs);
- bx_2 = _mm_sub_epi8(bx_2, off);
- const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
-
- __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
- __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[1].qs + 16));
- bx_3 = _mm_sub_epi8(bx_3, off);
- const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
-
- // Convert int32_t to float
- __m128 p0 = _mm_cvtepi32_ps(i32_0);
- __m128 p1 = _mm_cvtepi32_ps(i32_1);
- __m128 p2 = _mm_cvtepi32_ps(i32_2);
- __m128 p3 = _mm_cvtepi32_ps(i32_3);
-
- // Apply the scale
- acc_0 = _mm_mul_ps( d_0_1, p0 );
- acc_1 = _mm_mul_ps( d_0_1, p1 );
- acc_2 = _mm_mul_ps( d_2_3, p2 );
- acc_3 = _mm_mul_ps( d_2_3, p3 );
- }
-
- // Main loop
- GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
- for (int i = 2; i < nb; i+=2) {
- _mm_prefetch(&x[i] + sizeof(block_q4_0), _MM_HINT_T0);
- _mm_prefetch(&y[i] + sizeof(block_q8_0), _MM_HINT_T0);
-
- // Compute combined scale for the block 0 and 1
- const __m128 d_0_1 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d) );
-
- const __m128i tmp_0_1 = _mm_loadu_si128((const __m128i *)x[i].qs);
-
- __m128i bx_0 = _mm_and_si128(lowMask, tmp_0_1);
- __m128i by_0 = _mm_loadu_si128((const __m128i *)y[i].qs);
- bx_0 = _mm_sub_epi8(bx_0, off);
- const __m128i i32_0 = mul_sum_i8_pairs(bx_0, by_0);
-
- __m128i bx_1 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_0_1, 4));
- __m128i by_1 = _mm_loadu_si128((const __m128i *)(y[i].qs + 16));
- bx_1 = _mm_sub_epi8(bx_1, off);
- const __m128i i32_1 = mul_sum_i8_pairs(bx_1, by_1);
-
- _mm_prefetch(&x[i] + 2 * sizeof(block_q4_0), _MM_HINT_T0);
- _mm_prefetch(&y[i] + 2 * sizeof(block_q8_0), _MM_HINT_T0);
-
- // Compute combined scale for the block 2 and 3
- const __m128 d_2_3 = _mm_set1_ps( GGML_FP16_TO_FP32(x[i + 1].d) * GGML_FP16_TO_FP32(y[i + 1].d) );
-
- const __m128i tmp_2_3 = _mm_loadu_si128((const __m128i *)x[i + 1].qs);
-
- __m128i bx_2 = _mm_and_si128(lowMask, tmp_2_3);
- __m128i by_2 = _mm_loadu_si128((const __m128i *)y[i + 1].qs);
- bx_2 = _mm_sub_epi8(bx_2, off);
- const __m128i i32_2 = mul_sum_i8_pairs(bx_2, by_2);
-
- __m128i bx_3 = _mm_and_si128(lowMask, _mm_srli_epi64(tmp_2_3, 4));
- __m128i by_3 = _mm_loadu_si128((const __m128i *)(y[i + 1].qs + 16));
- bx_3 = _mm_sub_epi8(bx_3, off);
- const __m128i i32_3 = mul_sum_i8_pairs(bx_3, by_3);
-
- // Convert int32_t to float
- __m128 p0 = _mm_cvtepi32_ps(i32_0);
- __m128 p1 = _mm_cvtepi32_ps(i32_1);
- __m128 p2 = _mm_cvtepi32_ps(i32_2);
- __m128 p3 = _mm_cvtepi32_ps(i32_3);
-
- // Apply the scale
- __m128 p0_d = _mm_mul_ps( d_0_1, p0 );
- __m128 p1_d = _mm_mul_ps( d_0_1, p1 );
- __m128 p2_d = _mm_mul_ps( d_2_3, p2 );
- __m128 p3_d = _mm_mul_ps( d_2_3, p3 );
-
- // Acummulate
- acc_0 = _mm_add_ps(p0_d, acc_0);
- acc_1 = _mm_add_ps(p1_d, acc_1);
- acc_2 = _mm_add_ps(p2_d, acc_2);
- acc_3 = _mm_add_ps(p3_d, acc_3);
- }
-
- *s = hsum_float_4x4(acc_0, acc_1, acc_2, acc_3);
-#elif defined(__riscv_v_intrinsic)
- float sumf = 0.0;
-
- size_t vl = __riscv_vsetvl_e8m1(qk/2);
-
- for (int i = 0; i < nb; i++) {
- vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
-
- vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
- vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
-
- vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
- vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
-
- vint8m1_t x_ai = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
- vint8m1_t x_li = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
-
- vint8m1_t v0 = __riscv_vsub_vx_i8m1(x_ai, 8, vl);
- vint8m1_t v1 = __riscv_vsub_vx_i8m1(x_li, 8, vl);
-
- vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
- vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
-
- vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
-
- vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
- vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
-
- int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
- sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
-
- sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
- }
-
- *s = sumf;
-#else
- // scalar
- float sumf = 0.0;
-
- for (int i = 0; i < nb; i++) {
- int sumi = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const int v0 = (x[i].qs[j] & 0x0F) - 8;
- const int v1 = (x[i].qs[j] >> 4) - 8;
-
- sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
- }
-
- sumf += sumi*GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d);
- }
-
- *s = sumf;
-#endif
-}
-
-static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const int qk = QK8_1;
- const int nb = n / qk;
-
- assert(n % qk == 0);
-
- const block_q4_1 * restrict x = vx;
- const block_q8_1 * restrict y = vy;
-
- // TODO: add WASM SIMD
-#if defined(__ARM_NEON)
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- float32x4_t sumv1 = vdupq_n_f32(0.0f);
-
- float summs = 0;
-
- GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
- for (int i = 0; i < nb; i += 2) {
- const block_q4_1 * restrict x0 = &x[i + 0];
- const block_q4_1 * restrict x1 = &x[i + 1];
- const block_q8_1 * restrict y0 = &y[i + 0];
- const block_q8_1 * restrict y1 = &y[i + 1];
-
- summs += GGML_FP16_TO_FP32(x0->m) * y0->s + GGML_FP16_TO_FP32(x1->m) * y1->s;
-
- const uint8x16_t m4b = vdupq_n_u8(0x0F);
-
- const uint8x16_t v0_0 = vld1q_u8(x0->qs);
- const uint8x16_t v0_1 = vld1q_u8(x1->qs);
-
- // 4-bit -> 8-bit
- const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
- const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
- const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
- const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
-
- // load y
- const int8x16_t v1_0l = vld1q_s8(y0->qs);
- const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
- const int8x16_t v1_1l = vld1q_s8(y1->qs);
- const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
-
-#if defined(__ARM_FEATURE_DOTPROD)
- // dot product into int32x4_t
- const int32x4_t p_0 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
- const int32x4_t p_1 = vdotq_s32(vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
-
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_FP16_TO_FP32(x0->d)*y0->d);
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_FP16_TO_FP32(x1->d)*y1->d);
-#else
- const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0l), vget_low_s8 (v1_0l));
- const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0l), vget_high_s8(v1_0l));
- const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0h), vget_low_s8 (v1_0h));
- const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0h), vget_high_s8(v1_0h));
-
- const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1l), vget_low_s8 (v1_1l));
- const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1l), vget_high_s8(v1_1l));
- const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1h), vget_low_s8 (v1_1h));
- const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1h), vget_high_s8(v1_1h));
-
- const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
- const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
- const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
- const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
-
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
-#endif
- }
-
- *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
-#elif defined(__AVX2__) || defined(__AVX__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
-
- float summs = 0;
-
- // Main loop
- for (int i = 0; i < nb; ++i) {
- const float d0 = GGML_FP16_TO_FP32(x[i].d);
- const float d1 = y[i].d;
-
- summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
-
- const __m256 d0v = _mm256_set1_ps( d0 );
- const __m256 d1v = _mm256_set1_ps( d1 );
-
- // Compute combined scales
- const __m256 d0d1 = _mm256_mul_ps( d0v, d1v );
-
- // Load 16 bytes, and unpack 4 bit fields into bytes, making 32 bytes
- const __m256i bx = bytes_from_nibbles_32(x[i].qs);
- const __m256i by = _mm256_loadu_si256( (const __m256i *)y[i].qs );
-
- const __m256 xy = mul_sum_us8_pairs_float(bx, by);
-
- // Accumulate d0*d1*x*y
-#if defined(__AVX2__)
- acc = _mm256_fmadd_ps( d0d1, xy, acc );
-#else
- acc = _mm256_add_ps( _mm256_mul_ps( d0d1, xy ), acc );
-#endif
- }
-
- *s = hsum_float_8(acc) + summs;
-#elif defined(__riscv_v_intrinsic)
- float sumf = 0.0;
-
- size_t vl = __riscv_vsetvl_e8m1(qk/2);
-
- for (int i = 0; i < nb; i++) {
- vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
-
- vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
- vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
-
- vuint8m1_t x_a = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
- vuint8m1_t x_l = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
-
- vint8m1_t v0 = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
- vint8m1_t v1 = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
-
- vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
- vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
-
- vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
-
- vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
- vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
-
- int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
- sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
-
- sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
- }
-
- *s = sumf;
-#else
- // scalar
- float sumf = 0.0;
-
- for (int i = 0; i < nb; i++) {
- int sumi = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const int v0 = (x[i].qs[j] & 0x0F);
- const int v1 = (x[i].qs[j] >> 4);
-
- sumi += (v0 * y[i].qs[j]) + (v1 * y[i].qs[j + qk/2]);
- }
-
- sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
- }
-
- *s = sumf;
-#endif
-}
-
-static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const int qk = QK8_0;
- const int nb = n / qk;
-
- assert(n % qk == 0);
- assert(qk == QK5_0);
-
- const block_q5_0 * restrict x = vx;
- const block_q8_0 * restrict y = vy;
-
-#if defined(__ARM_NEON)
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- float32x4_t sumv1 = vdupq_n_f32(0.0f);
-
- uint32_t qh0;
- uint32_t qh1;
-
- uint64_t tmp0[4];
- uint64_t tmp1[4];
-
- GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
- for (int i = 0; i < nb; i += 2) {
- const block_q5_0 * restrict x0 = &x[i];
- const block_q5_0 * restrict x1 = &x[i + 1];
- const block_q8_0 * restrict y0 = &y[i];
- const block_q8_0 * restrict y1 = &y[i + 1];
-
- const uint8x16_t m4b = vdupq_n_u8(0x0F);
-
- // extract the 5th bit via lookup table ((!b) << 4)
- memcpy(&qh0, x0->qh, sizeof(qh0));
- memcpy(&qh1, x1->qh, sizeof(qh1));
-
- tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
- tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
- tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
- tmp0[3] = table_b2b_1[(qh0 >> 24) ];
-
- tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
- tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
- tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
- tmp1[3] = table_b2b_1[(qh1 >> 24) ];
-
- const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
- const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
- const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
- const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
-
- const uint8x16_t v0_0 = vld1q_u8(x0->qs);
- const uint8x16_t v0_1 = vld1q_u8(x1->qs);
-
- // 4-bit -> 8-bit
- int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
- int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
- int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
- int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
-
- // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
- const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
- const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
- const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
- const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
-
- // load y
- const int8x16_t v1_0l = vld1q_s8(y0->qs);
- const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
- const int8x16_t v1_1l = vld1q_s8(y1->qs);
- const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
-
-#if defined(__ARM_FEATURE_DOTPROD)
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
- vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
- vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
- vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
- vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
-#else
- const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
- const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
- const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
- const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
-
- const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
- const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
- const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
- const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
-
- const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
- const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
- const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
- const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
-
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
-#endif
- }
-
- *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
-#elif defined(__wasm_simd128__)
- v128_t sumv = wasm_f32x4_splat(0.0f);
-
- uint32_t qh;
- uint64_t tmp[4];
-
- // TODO: check if unrolling this is better
- for (int i = 0; i < nb; ++i) {
- const block_q5_0 * restrict x0 = &x[i];
- const block_q8_0 * restrict y0 = &y[i];
-
- const v128_t m4b = wasm_i8x16_splat(0x0F);
-
- // extract the 5th bit
- memcpy(&qh, x0->qh, sizeof(qh));
-
- tmp[0] = table_b2b_1[(qh >> 0) & 0xFF];
- tmp[1] = table_b2b_1[(qh >> 8) & 0xFF];
- tmp[2] = table_b2b_1[(qh >> 16) & 0xFF];
- tmp[3] = table_b2b_1[(qh >> 24) ];
-
- const v128_t qhl = wasm_v128_load(tmp + 0);
- const v128_t qhh = wasm_v128_load(tmp + 2);
-
- const v128_t v0 = wasm_v128_load(x0->qs);
-
- // 4-bit -> 8-bit
- const v128_t v0l = wasm_v128_and (v0, m4b);
- const v128_t v0h = wasm_u8x16_shr(v0, 4);
-
- // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
- const v128_t v0lf = wasm_i8x16_sub(v0l, qhl);
- const v128_t v0hf = wasm_i8x16_sub(v0h, qhh);
-
- // load y
- const v128_t v1l = wasm_v128_load(y0->qs);
- const v128_t v1h = wasm_v128_load(y0->qs + 16);
-
- // int8x16 -> int16x8
- const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
- const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
- const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
- const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
-
- const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
- const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
- const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
- const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
-
- // dot product
- sumv = wasm_f32x4_add(sumv, wasm_f32x4_mul(wasm_f32x4_convert_i32x4(
- wasm_i32x4_add(
- wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
- wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
- wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
- wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
- wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * GGML_FP16_TO_FP32(y0->d))));
- }
-
- *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
- wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3);
-#elif defined(__AVX2__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
-
- // Main loop
- for (int i = 0; i < nb; i++) {
- /* Compute combined scale for the block */
- const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
-
- __m256i bx = bytes_from_nibbles_32(x[i].qs);
- __m256i bxhi = bytes_from_bits_32(x[i].qh);
- bxhi = _mm256_andnot_si256(bxhi, _mm256_set1_epi8((char)0xF0));
- bx = _mm256_or_si256(bx, bxhi);
-
- __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
-
- const __m256 q = mul_sum_i8_pairs_float(bx, by);
-
- /* Multiply q with scale and accumulate */
- acc = _mm256_fmadd_ps(d, q, acc);
- }
-
- *s = hsum_float_8(acc);
-#elif defined(__AVX__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
- __m128i mask = _mm_set1_epi8((char)0xF0);
-
- // Main loop
- for (int i = 0; i < nb; i++) {
- /* Compute combined scale for the block */
- const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
-
- __m256i bx = bytes_from_nibbles_32(x[i].qs);
- const __m256i bxhi = bytes_from_bits_32(x[i].qh);
- __m128i bxhil = _mm256_castsi256_si128(bxhi);
- __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
- bxhil = _mm_andnot_si128(bxhil, mask);
- bxhih = _mm_andnot_si128(bxhih, mask);
- __m128i bxl = _mm256_castsi256_si128(bx);
- __m128i bxh = _mm256_extractf128_si256(bx, 1);
- bxl = _mm_or_si128(bxl, bxhil);
- bxh = _mm_or_si128(bxh, bxhih);
- bx = MM256_SET_M128I(bxh, bxl);
-
- const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
-
- const __m256 q = mul_sum_i8_pairs_float(bx, by);
-
- /* Multiply q with scale and accumulate */
- acc = _mm256_add_ps(_mm256_mul_ps(d, q), acc);
- }
-
- *s = hsum_float_8(acc);
-#elif defined(__riscv_v_intrinsic)
- float sumf = 0.0;
-
- uint32_t qh;
-
- // These temp values are for masking and shift operations
- uint32_t temp_1[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
- uint32_t temp_2[16] = {0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80,
- 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000};
-
- size_t vl = __riscv_vsetvl_e8m1(qk/2);
-
- for (int i = 0; i < nb; i++) {
- memcpy(&qh, x[i].qh, sizeof(uint32_t));
-
- // temporary registers
- vuint32m4_t vt_1 = __riscv_vle32_v_u32m4(temp_2, vl);
- vuint32m4_t vt_2 = __riscv_vle32_v_u32m4(temp_1, vl);
- vuint32m4_t vt_3 = __riscv_vsll_vx_u32m4(vt_1, 16, vl);
- vuint32m4_t vt_4 = __riscv_vadd_vx_u32m4(vt_2, 12, vl);
-
- // ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
- vuint32m4_t xha_0 = __riscv_vand_vx_u32m4(vt_1, qh, vl);
- vuint32m4_t xhr_0 = __riscv_vsrl_vv_u32m4(xha_0, vt_2, vl);
- vuint32m4_t xhl_0 = __riscv_vsll_vx_u32m4(xhr_0, 4, vl);
-
- // ((qh & (1u << (j + 16))) >> (j + 12));
- vuint32m4_t xha_1 = __riscv_vand_vx_u32m4(vt_3, qh, vl);
- vuint32m4_t xhl_1 = __riscv_vsrl_vv_u32m4(xha_1, vt_4, vl);
-
- // narrowing
- vuint16m2_t xhc_0 = __riscv_vncvt_x_x_w_u16m2(xhl_0, vl);
- vuint8m1_t xh_0 = __riscv_vncvt_x_x_w_u8m1(xhc_0, vl);
-
- vuint16m2_t xhc_1 = __riscv_vncvt_x_x_w_u16m2(xhl_1, vl);
- vuint8m1_t xh_1 = __riscv_vncvt_x_x_w_u8m1(xhc_1, vl);
-
- // load
- vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
-
- vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
- vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
-
- vuint8m1_t x_at = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
- vuint8m1_t x_lt = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
-
- vuint8m1_t x_a = __riscv_vor_vv_u8m1(x_at, xh_0, vl);
- vuint8m1_t x_l = __riscv_vor_vv_u8m1(x_lt, xh_1, vl);
-
- vint8m1_t x_ai = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
- vint8m1_t x_li = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
-
- vint8m1_t v0 = __riscv_vsub_vx_i8m1(x_ai, 16, vl);
- vint8m1_t v1 = __riscv_vsub_vx_i8m1(x_li, 16, vl);
-
- vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
- vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
-
- vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
-
- vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
- vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
-
- int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
- sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
-
- sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
- }
-
- *s = sumf;
-#else
- // scalar
- float sumf = 0.0;
-
- for (int i = 0; i < nb; i++) {
- uint32_t qh;
- memcpy(&qh, x[i].qh, sizeof(qh));
-
- int sumi = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
- const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
-
- const int32_t x0 = ((x[i].qs[j] & 0x0F) | xh_0) - 16;
- const int32_t x1 = ((x[i].qs[j] >> 4) | xh_1) - 16;
-
- sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
- }
-
- sumf += (GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d)) * sumi;
- }
-
- *s = sumf;
-#endif
-}
-
-static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const int qk = QK8_1;
- const int nb = n / qk;
-
- assert(n % qk == 0);
- assert(qk == QK5_1);
-
- const block_q5_1 * restrict x = vx;
- const block_q8_1 * restrict y = vy;
-
-#if defined(__ARM_NEON)
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- float32x4_t sumv1 = vdupq_n_f32(0.0f);
-
- float summs0 = 0.0f;
- float summs1 = 0.0f;
-
- uint32_t qh0;
- uint32_t qh1;
-
- uint64_t tmp0[4];
- uint64_t tmp1[4];
-
- GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
- for (int i = 0; i < nb; i += 2) {
- const block_q5_1 * restrict x0 = &x[i];
- const block_q5_1 * restrict x1 = &x[i + 1];
- const block_q8_1 * restrict y0 = &y[i];
- const block_q8_1 * restrict y1 = &y[i + 1];
-
- const uint8x16_t m4b = vdupq_n_u8(0x0F);
-
- summs0 += GGML_FP16_TO_FP32(x0->m) * y0->s;
- summs1 += GGML_FP16_TO_FP32(x1->m) * y1->s;
-
- // extract the 5th bit via lookup table ((b) << 4)
- memcpy(&qh0, x0->qh, sizeof(qh0));
- memcpy(&qh1, x1->qh, sizeof(qh1));
-
- tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
- tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
- tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
- tmp0[3] = table_b2b_0[(qh0 >> 24) ];
-
- tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
- tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
- tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
- tmp1[3] = table_b2b_0[(qh1 >> 24) ];
-
- const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
- const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
- const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
- const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
-
- const uint8x16_t v0_0 = vld1q_u8(x0->qs);
- const uint8x16_t v0_1 = vld1q_u8(x1->qs);
-
- // 4-bit -> 8-bit
- const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
- const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
- const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
- const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
-
- // add high bit
- const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
- const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
- const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
- const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
-
- // load y
- const int8x16_t v1_0l = vld1q_s8(y0->qs);
- const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
- const int8x16_t v1_1l = vld1q_s8(y1->qs);
- const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
-
-#if defined(__ARM_FEATURE_DOTPROD)
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
- vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
- vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_FP16_TO_FP32(x0->d)*y0->d);
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
- vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
- vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_FP16_TO_FP32(x1->d)*y1->d);
-#else
- const int16x8_t pl0l = vmull_s8(vget_low_s8 (v0_0lf), vget_low_s8 (v1_0l));
- const int16x8_t pl0h = vmull_s8(vget_high_s8(v0_0lf), vget_high_s8(v1_0l));
- const int16x8_t ph0l = vmull_s8(vget_low_s8 (v0_0hf), vget_low_s8 (v1_0h));
- const int16x8_t ph0h = vmull_s8(vget_high_s8(v0_0hf), vget_high_s8(v1_0h));
-
- const int16x8_t pl1l = vmull_s8(vget_low_s8 (v0_1lf), vget_low_s8 (v1_1l));
- const int16x8_t pl1h = vmull_s8(vget_high_s8(v0_1lf), vget_high_s8(v1_1l));
- const int16x8_t ph1l = vmull_s8(vget_low_s8 (v0_1hf), vget_low_s8 (v1_1h));
- const int16x8_t ph1h = vmull_s8(vget_high_s8(v0_1hf), vget_high_s8(v1_1h));
-
- const int32x4_t pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h));
- const int32x4_t ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h));
- const int32x4_t pl1 = vaddq_s32(vpaddlq_s16(pl1l), vpaddlq_s16(pl1h));
- const int32x4_t ph1 = vaddq_s32(vpaddlq_s16(ph1l), vpaddlq_s16(ph1h));
-
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), GGML_FP16_TO_FP32(x0->d)*y0->d);
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(pl1, ph1)), GGML_FP16_TO_FP32(x1->d)*y1->d);
-#endif
- }
-
- *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
-#elif defined(__wasm_simd128__)
- v128_t sumv = wasm_f32x4_splat(0.0f);
-
- float summs = 0.0f;
-
- uint32_t qh;
- uint64_t tmp[4];
-
- // TODO: check if unrolling this is better
- for (int i = 0; i < nb; ++i) {
- const block_q5_1 * restrict x0 = &x[i];
- const block_q8_1 * restrict y0 = &y[i];
-
- summs += GGML_FP16_TO_FP32(x0->m) * y0->s;
-
- const v128_t m4b = wasm_i8x16_splat(0x0F);
-
- // extract the 5th bit
- memcpy(&qh, x0->qh, sizeof(qh));
-
- tmp[0] = table_b2b_0[(qh >> 0) & 0xFF];
- tmp[1] = table_b2b_0[(qh >> 8) & 0xFF];
- tmp[2] = table_b2b_0[(qh >> 16) & 0xFF];
- tmp[3] = table_b2b_0[(qh >> 24) ];
-
- const v128_t qhl = wasm_v128_load(tmp + 0);
- const v128_t qhh = wasm_v128_load(tmp + 2);
-
- const v128_t v0 = wasm_v128_load(x0->qs);
-
- // 4-bit -> 8-bit
- const v128_t v0l = wasm_v128_and (v0, m4b);
- const v128_t v0h = wasm_u8x16_shr(v0, 4);
-
- // add high bit
- const v128_t v0lf = wasm_v128_or(v0l, qhl);
- const v128_t v0hf = wasm_v128_or(v0h, qhh);
-
- // load y
- const v128_t v1l = wasm_v128_load(y0->qs);
- const v128_t v1h = wasm_v128_load(y0->qs + 16);
-
- // int8x16 -> int16x8
- const v128_t v0lfl = wasm_i16x8_extend_low_i8x16 (v0lf);
- const v128_t v0lfh = wasm_i16x8_extend_high_i8x16(v0lf);
- const v128_t v0hfl = wasm_i16x8_extend_low_i8x16 (v0hf);
- const v128_t v0hfh = wasm_i16x8_extend_high_i8x16(v0hf);
-
- const v128_t v1ll = wasm_i16x8_extend_low_i8x16 (v1l);
- const v128_t v1lh = wasm_i16x8_extend_high_i8x16(v1l);
- const v128_t v1hl = wasm_i16x8_extend_low_i8x16 (v1h);
- const v128_t v1hh = wasm_i16x8_extend_high_i8x16(v1h);
-
- // dot product
- sumv = wasm_f32x4_add(sumv,
- wasm_f32x4_mul(wasm_f32x4_convert_i32x4(wasm_i32x4_add(
- wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0lfl, v1ll),
- wasm_i32x4_dot_i16x8(v0lfh, v1lh)),
- wasm_i32x4_add(wasm_i32x4_dot_i16x8(v0hfl, v1hl),
- wasm_i32x4_dot_i16x8(v0hfh, v1hh)))),
- wasm_f32x4_splat(GGML_FP16_TO_FP32(x0->d) * y0->d)));
- }
-
- *s = wasm_f32x4_extract_lane(sumv, 0) + wasm_f32x4_extract_lane(sumv, 1) +
- wasm_f32x4_extract_lane(sumv, 2) + wasm_f32x4_extract_lane(sumv, 3) + summs;
-#elif defined(__AVX2__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
-
- float summs = 0.0f;
-
- // Main loop
- for (int i = 0; i < nb; i++) {
- const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
-
- summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
-
- __m256i bx = bytes_from_nibbles_32(x[i].qs);
- __m256i bxhi = bytes_from_bits_32(x[i].qh);
- bxhi = _mm256_and_si256(bxhi, _mm256_set1_epi8(0x10));
- bx = _mm256_or_si256(bx, bxhi);
-
- const __m256 dy = _mm256_set1_ps(y[i].d);
- const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
-
- const __m256 q = mul_sum_us8_pairs_float(bx, by);
-
- acc = _mm256_fmadd_ps(q, _mm256_mul_ps(dx, dy), acc);
- }
-
- *s = hsum_float_8(acc) + summs;
-#elif defined(__AVX__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
- __m128i mask = _mm_set1_epi8(0x10);
-
- float summs = 0.0f;
-
- // Main loop
- for (int i = 0; i < nb; i++) {
- const __m256 dx = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d));
-
- summs += GGML_FP16_TO_FP32(x[i].m) * y[i].s;
-
- __m256i bx = bytes_from_nibbles_32(x[i].qs);
- const __m256i bxhi = bytes_from_bits_32(x[i].qh);
- __m128i bxhil = _mm256_castsi256_si128(bxhi);
- __m128i bxhih = _mm256_extractf128_si256(bxhi, 1);
- bxhil = _mm_and_si128(bxhil, mask);
- bxhih = _mm_and_si128(bxhih, mask);
- __m128i bxl = _mm256_castsi256_si128(bx);
- __m128i bxh = _mm256_extractf128_si256(bx, 1);
- bxl = _mm_or_si128(bxl, bxhil);
- bxh = _mm_or_si128(bxh, bxhih);
- bx = MM256_SET_M128I(bxh, bxl);
-
- const __m256 dy = _mm256_set1_ps(y[i].d);
- const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
-
- const __m256 q = mul_sum_us8_pairs_float(bx, by);
-
- acc = _mm256_add_ps(_mm256_mul_ps(q, _mm256_mul_ps(dx, dy)), acc);
- }
-
- *s = hsum_float_8(acc) + summs;
-#elif defined(__riscv_v_intrinsic)
- float sumf = 0.0;
-
- uint32_t qh;
-
- // These temp values are for shift operations
- uint32_t temp_1[16] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
-
- size_t vl = __riscv_vsetvl_e8m1(qk/2);
-
- for (int i = 0; i < nb; i++) {
- memcpy(&qh, x[i].qh, sizeof(uint32_t));
-
- // temporary registers
- vuint32m4_t vt_1 = __riscv_vle32_v_u32m4(temp_1, vl);
- vuint32m4_t vt_2 = __riscv_vadd_vx_u32m4(vt_1, 12, vl);
+#define GGML_F16_VEC_STORE(p, r, i) GGML_F32Cx4_STORE(p, r[i])
+#define GGML_F16_VEC_FMA GGML_F32Cx4_FMA
+#define GGML_F16_VEC_ADD GGML_F32Cx4_ADD
+#define GGML_F16_VEC_MUL GGML_F32Cx4_MUL
+#define GGML_F16_VEC_REDUCE GGML_F32Cx4_REDUCE
- // load qh
- vuint32m4_t vqh = __riscv_vmv_v_x_u32m4(qh, vl);
+#endif
- // ((qh >> (j + 0)) << 4) & 0x10;
- vuint32m4_t xhr_0 = __riscv_vsrl_vv_u32m4(vqh, vt_1, vl);
- vuint32m4_t xhl_0 = __riscv_vsll_vx_u32m4(xhr_0, 4, vl);
- vuint32m4_t xha_0 = __riscv_vand_vx_u32m4(xhl_0, 0x10, vl);
+// GGML_F32_ARR / GGML_F16_ARR
+// number of registers to use per step
+#ifdef GGML_SIMD
+#define GGML_F32_ARR (GGML_F32_STEP/GGML_F32_EPR)
+#define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR)
+#endif
- // ((qh >> (j + 12)) ) & 0x10;
- vuint32m4_t xhr_1 = __riscv_vsrl_vv_u32m4(vqh, vt_2, vl);
- vuint32m4_t xha_1 = __riscv_vand_vx_u32m4(xhr_1, 0x10, vl);
+//
+// fundamental operations
+//
- // narrowing
- vuint16m2_t xhc_0 = __riscv_vncvt_x_x_w_u16m2(xha_0, vl);
- vuint8m1_t xh_0 = __riscv_vncvt_x_x_w_u8m1(xhc_0, vl);
+inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
- vuint16m2_t xhc_1 = __riscv_vncvt_x_x_w_u16m2(xha_1, vl);
- vuint8m1_t xh_1 = __riscv_vncvt_x_x_w_u8m1(xhc_1, vl);
+inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
- // load
- vuint8m1_t tx = __riscv_vle8_v_u8m1(x[i].qs, vl);
+inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
- vint8m1_t y0 = __riscv_vle8_v_i8m1(y[i].qs, vl);
- vint8m1_t y1 = __riscv_vle8_v_i8m1(y[i].qs+16, vl);
+inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
- vuint8m1_t x_at = __riscv_vand_vx_u8m1(tx, 0x0F, vl);
- vuint8m1_t x_lt = __riscv_vsrl_vx_u8m1(tx, 0x04, vl);
+inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
+inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
+inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
+inline static void ggml_vec_acc1_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] += v; }
+inline static void ggml_vec_sub_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] - y[i]; }
+inline static void ggml_vec_set_f32 (const int n, float * x, const float v) { for (int i = 0; i < n; ++i) x[i] = v; }
+inline static void ggml_vec_cpy_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i]; }
+inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = -x[i]; }
+inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
+inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
- vuint8m1_t x_a = __riscv_vor_vv_u8m1(x_at, xh_0, vl);
- vuint8m1_t x_l = __riscv_vor_vv_u8m1(x_lt, xh_1, vl);
+static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
+#ifdef GGML_SIMD
+ float sumf = 0.0f;
+ const int np = (n & ~(GGML_F32_STEP - 1));
- vint8m1_t v0 = __riscv_vreinterpret_v_u8m1_i8m1(x_a);
- vint8m1_t v1 = __riscv_vreinterpret_v_u8m1_i8m1(x_l);
+ GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO };
- vint16m2_t vec_mul1 = __riscv_vwmul_vv_i16m2(v0, y0, vl);
- vint16m2_t vec_mul2 = __riscv_vwmul_vv_i16m2(v1, y1, vl);
+ GGML_F32_VEC ax[GGML_F32_ARR];
+ GGML_F32_VEC ay[GGML_F32_ARR];
- vint32m1_t vec_zero = __riscv_vmv_v_x_i32m1(0, vl);
+ for (int i = 0; i < np; i += GGML_F32_STEP) {
+ for (int j = 0; j < GGML_F32_ARR; j++) {
+ ax[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
+ ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
- vint32m1_t vs1 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul1, vec_zero, vl);
- vint32m1_t vs2 = __riscv_vwredsum_vs_i16m2_i32m1(vec_mul2, vec_zero, vl);
+ sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], ay[j]);
+ }
+ }
- int sumi = __riscv_vmv_x_s_i32m1_i32(vs1);
- sumi += __riscv_vmv_x_s_i32m1_i32(vs2);
+ // reduce sum0..sum3 to sum0
+ GGML_F32_VEC_REDUCE(sumf, sum);
- sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
+ // leftovers
+ for (int i = np; i < n; ++i) {
+ sumf += x[i]*y[i];
}
-
- *s = sumf;
#else
// scalar
- float sumf = 0.0;
-
- for (int i = 0; i < nb; i++) {
- uint32_t qh;
- memcpy(&qh, x[i].qh, sizeof(qh));
-
- int sumi = 0;
-
- for (int j = 0; j < qk/2; ++j) {
- const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
- const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
-
- const int32_t x0 = (x[i].qs[j] & 0xF) | xh_0;
- const int32_t x1 = (x[i].qs[j] >> 4) | xh_1;
-
- sumi += (x0 * y[i].qs[j]) + (x1 * y[i].qs[j + qk/2]);
- }
-
- sumf += (GGML_FP16_TO_FP32(x[i].d)*y[i].d)*sumi + GGML_FP16_TO_FP32(x[i].m)*y[i].s;
+ ggml_float sumf = 0.0;
+ for (int i = 0; i < n; ++i) {
+ sumf += (ggml_float)(x[i]*y[i]);
}
+#endif
*s = sumf;
-#endif
}
-static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy) {
- const int qk = QK8_0;
- const int nb = n / qk;
-
- assert(n % qk == 0);
-
- const block_q8_0 * restrict x = vx;
- const block_q8_0 * restrict y = vy;
-
-#if defined(__ARM_NEON)
- float32x4_t sumv0 = vdupq_n_f32(0.0f);
- float32x4_t sumv1 = vdupq_n_f32(0.0f);
-
- GGML_ASSERT(nb % 2 == 0); // TODO: handle odd nb
- for (int i = 0; i < nb; i += 2) {
- const block_q8_0 * restrict x0 = &x[i + 0];
- const block_q8_0 * restrict x1 = &x[i + 1];
- const block_q8_0 * restrict y0 = &y[i + 0];
- const block_q8_0 * restrict y1 = &y[i + 1];
-
- const int8x16_t x0_0 = vld1q_s8(x0->qs);
- const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
- const int8x16_t x1_0 = vld1q_s8(x1->qs);
- const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
-
- // load y
- const int8x16_t y0_0 = vld1q_s8(y0->qs);
- const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
- const int8x16_t y1_0 = vld1q_s8(y1->qs);
- const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
-
-#if defined(__ARM_FEATURE_DOTPROD)
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
- vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
- vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
-
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
- vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
- vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
+static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
+ ggml_float sumf = 0.0;
-#else
- const int16x8_t p0_0 = vmull_s8(vget_low_s8 (x0_0), vget_low_s8 (y0_0));
- const int16x8_t p0_1 = vmull_s8(vget_high_s8(x0_0), vget_high_s8(y0_0));
- const int16x8_t p0_2 = vmull_s8(vget_low_s8 (x0_1), vget_low_s8 (y0_1));
- const int16x8_t p0_3 = vmull_s8(vget_high_s8(x0_1), vget_high_s8(y0_1));
-
- const int16x8_t p1_0 = vmull_s8(vget_low_s8 (x1_0), vget_low_s8 (y1_0));
- const int16x8_t p1_1 = vmull_s8(vget_high_s8(x1_0), vget_high_s8(y1_0));
- const int16x8_t p1_2 = vmull_s8(vget_low_s8 (x1_1), vget_low_s8 (y1_1));
- const int16x8_t p1_3 = vmull_s8(vget_high_s8(x1_1), vget_high_s8(y1_1));
-
- const int32x4_t p0 = vaddq_s32(vpaddlq_s16(p0_0), vpaddlq_s16(p0_1));
- const int32x4_t p1 = vaddq_s32(vpaddlq_s16(p0_2), vpaddlq_s16(p0_3));
- const int32x4_t p2 = vaddq_s32(vpaddlq_s16(p1_0), vpaddlq_s16(p1_1));
- const int32x4_t p3 = vaddq_s32(vpaddlq_s16(p1_2), vpaddlq_s16(p1_3));
-
- sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), GGML_FP16_TO_FP32(x0->d)*GGML_FP16_TO_FP32(y0->d));
- sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(p2, p3)), GGML_FP16_TO_FP32(x1->d)*GGML_FP16_TO_FP32(y1->d));
-#endif
- }
+#if defined(GGML_SIMD)
+ const int np = (n & ~(GGML_F16_STEP - 1));
- *s = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
-#elif defined(__AVX2__) || defined(__AVX__)
- // Initialize accumulator with zeros
- __m256 acc = _mm256_setzero_ps();
+ GGML_F16_VEC sum[GGML_F16_ARR] = { GGML_F16_VEC_ZERO };
- // Main loop
- for (int i = 0; i < nb; ++i) {
- // Compute combined scale for the block
- const __m256 d = _mm256_set1_ps(GGML_FP16_TO_FP32(x[i].d) * GGML_FP16_TO_FP32(y[i].d));
- __m256i bx = _mm256_loadu_si256((const __m256i *)x[i].qs);
- __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
+ GGML_F16_VEC ax[GGML_F16_ARR];
+ GGML_F16_VEC ay[GGML_F16_ARR];
- const __m256 q = mul_sum_i8_pairs_float(bx, by);
+ for (int i = 0; i < np; i += GGML_F16_STEP) {
+ for (int j = 0; j < GGML_F16_ARR; j++) {
+ ax[j] = GGML_F16_VEC_LOAD(x + i + j*GGML_F16_EPR, j);
+ ay[j] = GGML_F16_VEC_LOAD(y + i + j*GGML_F16_EPR, j);
- // Multiply q with scale and accumulate
-#if defined(__AVX2__)
- acc = _mm256_fmadd_ps( d, q, acc );
-#else
- acc = _mm256_add_ps( _mm256_mul_ps( d, q ), acc );
-#endif
+ sum[j] = GGML_F16_VEC_FMA(sum[j], ax[j], ay[j]);
+ }
}
- *s = hsum_float_8(acc);
-#elif defined(__riscv_v_intrinsic)
- float sumf = 0.0;
- size_t vl = __riscv_vsetvl_e8m1(qk);
-
- for (int i = 0; i < nb; i++) {
- // load elements
- vint8m1_t bx = __riscv_vle8_v_i8m1(x[i].qs, vl);
- vint8m1_t by = __riscv_vle8_v_i8m1(y[i].qs, vl);
-
- vint16m2_t vw_mul = __riscv_vwmul_vv_i16m2(bx, by, vl);
-
- vint32m1_t v_zero = __riscv_vmv_v_x_i32m1(0, vl);
- vint32m1_t v_sum = __riscv_vwredsum_vs_i16m2_i32m1(vw_mul, v_zero, vl);
-
- int sumi = __riscv_vmv_x_s_i32m1_i32(v_sum);
+ // reduce sum0..sum3 to sum0
+ GGML_F16_VEC_REDUCE(sumf, sum);
- sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
+ // leftovers
+ for (int i = np; i < n; ++i) {
+ sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
}
-
- *s = sumf;
#else
- // scalar
- float sumf = 0.0;
-
- for (int i = 0; i < nb; i++) {
- int sumi = 0;
-
- for (int j = 0; j < qk; j++) {
- sumi += x[i].qs[j]*y[i].qs[j];
- }
-
- sumf += sumi*(GGML_FP16_TO_FP32(x[i].d)*GGML_FP16_TO_FP32(y[i].d));
+ for (int i = 0; i < n; ++i) {
+ sumf += (ggml_float)(GGML_FP16_TO_FP32(x[i])*GGML_FP16_TO_FP32(y[i]));
}
+#endif
*s = sumf;
-#endif
}
// compute GGML_VEC_DOT_UNROLL dot products at once
#endif
}
+// xs and vs are byte strides of x and v
+inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int vs, float * restrict y, const float * restrict xv, const float * restrict vv) {
+
+ const float * restrict x[GGML_VEC_MAD_UNROLL];
+ const float * restrict v[GGML_VEC_MAD_UNROLL];
+
+ for (int i = 0; i < GGML_VEC_MAD_UNROLL; ++i) {
+ x[i] = (const float *) ((const char *) xv + i*xs);
+ v[i] = (const float *) ((const char *) vv + i*vs);
+ }
+
+#if defined(GGML_SIMD)
+ const int np = (n & ~(GGML_F32_STEP - 1));
+
+ GGML_F32_VEC vx[GGML_VEC_MAD_UNROLL];
+
+ for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
+ vx[k] = GGML_F32_VEC_SET1(v[k][0]);
+ }
+
+ GGML_F32_VEC ax[GGML_VEC_MAD_UNROLL][GGML_F32_ARR];
+ GGML_F32_VEC ay[GGML_F32_ARR];
+
+ for (int i = 0; i < np; i += GGML_F32_STEP) {
+ for (int j = 0; j < GGML_F32_ARR; j++) {
+ ay[j] = GGML_F32_VEC_LOAD(y + i + j*GGML_F32_EPR);
+
+ for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
+ ax[k][j] = GGML_F32_VEC_LOAD(x[k] + i + j*GGML_F32_EPR);
+ ay[j] = GGML_F32_VEC_FMA(ay[j], ax[k][j], vx[k]);
+ }
+
+ GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
+ }
+ }
+
+ // leftovers
+ for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
+ for (int i = np; i < n; ++i) {
+ y[i] += x[k][i]*v[k][0];
+ }
+ }
+#else
+ // scalar
+ for (int k = 0; k < GGML_VEC_MAD_UNROLL; ++k) {
+ for (int i = 0; i < n; ++i) {
+ y[i] += x[k][i]*v[k][0];
+ }
+ }
+#endif
+}
+
//inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
#if defined(GGML_USE_ACCELERATE)
inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = tanhf(x[i]); }
inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
+inline static void ggml_vec_leaky_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.1f*x[i]; }
static const float GELU_COEF_A = 0.044715f;
static const float GELU_QUICK_COEF = -1.702f;
inline static void ggml_vec_gelu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
const uint16_t * i16 = (const uint16_t *) x;
for (int i = 0; i < n; ++i) {
- y[i] = table_gelu_f16[i16[i]];
+ y[i] = ggml_table_gelu_f16[i16[i]];
}
}
for (int i = 0; i < n; ++i) {
ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
memcpy(&t, &fp16, sizeof(uint16_t));
- y[i] = GGML_FP16_TO_FP32(table_gelu_f16[t]);
+ y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_f16[t]);
}
}
#else
//inline static void ggml_vec_gelu_quick_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
// const uint16_t * i16 = (const uint16_t *) x;
// for (int i = 0; i < n; ++i) {
-// y[i] = table_gelu_quick_f16[i16[i]];
+// y[i] = ggml_table_gelu_quick_f16[i16[i]];
// }
//}
for (int i = 0; i < n; ++i) {
ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
memcpy(&t, &fp16, sizeof(uint16_t));
- y[i] = GGML_FP16_TO_FP32(table_gelu_quick_f16[t]);
+ y[i] = GGML_FP16_TO_FP32(ggml_table_gelu_quick_f16[t]);
}
}
#else
//inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) {
// const uint16_t * i16 = (const uint16_t *) x;
// for (int i = 0; i < n; ++i) {
-// y[i] = table_silu_f16[i16[i]];
+// y[i] = ggml_table_silu_f16[i16[i]];
// }
//}
for (int i = 0; i < n; ++i) {
ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]);
memcpy(&t, &fp16, sizeof(uint16_t));
- y[i] = GGML_FP16_TO_FP32(table_silu_f16[t]);
+ y[i] = GGML_FP16_TO_FP32(ggml_table_silu_f16[t]);
}
}
#else
"ALIBI",
"CLAMP",
"CONV_1D",
+ "CONV_1D_STAGE_0",
+ "CONV_1D_STAGE_1",
+ "CONV_TRANSPOSE_1D",
"CONV_2D",
+ "CONV_2D_STAGE_0",
+ "CONV_2D_STAGE_1",
"CONV_TRANSPOSE_2D",
"POOL_1D",
"POOL_2D",
"CROSS_ENTROPY_LOSS_BACK",
};
-static_assert(GGML_OP_COUNT == 68, "GGML_OP_COUNT != 68");
+static_assert(GGML_OP_COUNT == 73, "GGML_OP_COUNT != 73");
static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
"none",
"alibi(x)",
"clamp(x)",
"conv_1d(x)",
+ "conv_1d_stage_0(x)",
+ "conv_1d_stage_1(x)",
+ "conv_transpose_1d(x)",
"conv_2d(x)",
+ "conv_2d_stage_0(x)",
+ "conv_2d_stage_1(x)",
"conv_transpose_2d(x)",
"pool_1d(x)",
"pool_2d(x)",
"cross_entropy_loss_back(x,y)",
};
-static_assert(GGML_OP_COUNT == 68, "GGML_OP_COUNT != 68");
+static_assert(GGML_OP_COUNT == 73, "GGML_OP_COUNT != 73");
static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
p[GGML_OP_DIAG_MASK_INF ] = true;
p[GGML_OP_DIAG_MASK_ZERO ] = true;
p[GGML_OP_CONV_1D ] = true;
+ p[GGML_OP_CONV_1D_STAGE_0 ] = true;
+ p[GGML_OP_CONV_1D_STAGE_1 ] = true;
+ p[GGML_OP_CONV_TRANSPOSE_1D ] = true;
p[GGML_OP_CONV_2D ] = true;
+ p[GGML_OP_CONV_2D_STAGE_0 ] = true;
+ p[GGML_OP_CONV_2D_STAGE_1 ] = true;
p[GGML_OP_CONV_TRANSPOSE_2D ] = true;
p[GGML_OP_FLASH_ATTN_BACK ] = true;
p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
static inline bool ggml_can_out_prod(const struct ggml_tensor * t0, const struct ggml_tensor * t1) {
static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
- return
- (t0->ne[1] == t1->ne[1]) &&
- (t0->ne[2] == t1->ne[2]) &&
- (t0->ne[3] == t1->ne[3]);
+ return (t0->ne[1] == t1->ne[1]) &&
+ (t1->ne[2]%t0->ne[2] == 0) && // verify t0 is broadcastable
+ (t1->ne[3]%t0->ne[3] == 0);
}
enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
for (int i = 0; i < (1 << 16); ++i) {
uint16_t ui = i;
memcpy(&ii, &ui, sizeof(ii));
- const float f = table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
- table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
- table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
- table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
- table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
+ const float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(ii);
+ ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f));
+ ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f));
+ ggml_table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f));
+ ggml_table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f));
}
const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
*result = (struct ggml_tensor) {
/*.type =*/ type,
/*.backend =*/ GGML_BACKEND_CPU,
+ /*.buffer =*/ NULL,
/*.n_dims =*/ n_dims,
/*.ne =*/ { 1, 1, 1, 1 },
/*.nb =*/ { 0, 0, 0, 0 },
return tensor;
}
+void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) {
+ const int64_t ne2 = tensor->ne[2];
+ const int64_t ne1 = tensor->ne[1];
+ const int64_t ne0 = tensor->ne[0];
+
+ const int64_t i3_ = (i/(ne2*ne1*ne0));
+ const int64_t i2_ = (i - i3_*ne2*ne1*ne0)/(ne1*ne0);
+ const int64_t i1_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0)/ne0;
+ const int64_t i0_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0 - i1_*ne0);
+
+ if (i0) {
+ * i0 = i0_;
+ }
+ if (i1) {
+ * i1 = i1_;
+ }
+ if (i2) {
+ * i2 = i2_;
+ }
+ if (i3) {
+ * i3 = i3_;
+ }
+}
+
int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
+ if (!ggml_is_contiguous(tensor)) {
+ int64_t id[4] = { 0, 0, 0, 0 };
+ ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
+ return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]);
+ }
switch (tensor->type) {
case GGML_TYPE_I8:
{
GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
return ((int8_t *)(tensor->data))[i];
- } break;
+ }
case GGML_TYPE_I16:
{
GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
return ((int16_t *)(tensor->data))[i];
- } break;
+ }
case GGML_TYPE_I32:
{
GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
return ((int32_t *)(tensor->data))[i];
- } break;
+ }
case GGML_TYPE_F16:
{
GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
- } break;
+ }
case GGML_TYPE_F32:
{
GGML_ASSERT(tensor->nb[0] == sizeof(float));
return ((float *)(tensor->data))[i];
- } break;
+ }
default:
{
GGML_ASSERT(false);
- } break;
+ }
}
return 0.0f;
}
void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
+ if (!ggml_is_contiguous(tensor)) {
+ int64_t id[4] = { 0, 0, 0, 0 };
+ ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
+ ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value);
+ return;
+ }
switch (tensor->type) {
case GGML_TYPE_I8:
{
}
}
+int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
+ void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
+ switch (tensor->type) {
+ case GGML_TYPE_I8:
+ return ((int8_t *) data)[0];
+ case GGML_TYPE_I16:
+ return ((int16_t *) data)[0];
+ case GGML_TYPE_I32:
+ return ((int32_t *) data)[0];
+ case GGML_TYPE_F16:
+ return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
+ case GGML_TYPE_F32:
+ return ((float *) data)[0];
+ default:
+ GGML_ASSERT(false);
+ }
+
+ return 0.0f;
+}
+
+void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) {
+ void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
+ switch (tensor->type) {
+ case GGML_TYPE_I8:
+ {
+ ((int8_t *)(data))[0] = value;
+ } break;
+ case GGML_TYPE_I16:
+ {
+ ((int16_t *)(data))[0] = value;
+ } break;
+ case GGML_TYPE_I32:
+ {
+ ((int32_t *)(data))[0] = value;
+ } break;
+ case GGML_TYPE_F16:
+ {
+ ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
+ } break;
+ case GGML_TYPE_F32:
+ {
+ ((float *)(data))[0] = value;
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
+ if (!ggml_is_contiguous(tensor)) {
+ int64_t id[4] = { 0, 0, 0, 0 };
+ ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
+ return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]);
+ }
switch (tensor->type) {
case GGML_TYPE_I8:
{
GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
return ((int8_t *)(tensor->data))[i];
- } break;
+ }
case GGML_TYPE_I16:
{
GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
return ((int16_t *)(tensor->data))[i];
- } break;
+ }
case GGML_TYPE_I32:
{
GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
return ((int32_t *)(tensor->data))[i];
- } break;
+ }
case GGML_TYPE_F16:
{
GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
return GGML_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
- } break;
+ }
case GGML_TYPE_F32:
{
GGML_ASSERT(tensor->nb[0] == sizeof(float));
return ((float *)(tensor->data))[i];
+ }
+ default:
+ {
+ GGML_ASSERT(false);
+ }
+ }
+
+ return 0.0f;
+}
+
+void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
+ if (!ggml_is_contiguous(tensor)) {
+ int64_t id[4] = { 0, 0, 0, 0 };
+ ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
+ ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
+ return;
+ }
+ switch (tensor->type) {
+ case GGML_TYPE_I8:
+ {
+ GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
+ ((int8_t *)(tensor->data))[i] = value;
+ } break;
+ case GGML_TYPE_I16:
+ {
+ GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
+ ((int16_t *)(tensor->data))[i] = value;
+ } break;
+ case GGML_TYPE_I32:
+ {
+ GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
+ ((int32_t *)(tensor->data))[i] = value;
+ } break;
+ case GGML_TYPE_F16:
+ {
+ GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
+ ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
+ } break;
+ case GGML_TYPE_F32:
+ {
+ GGML_ASSERT(tensor->nb[0] == sizeof(float));
+ ((float *)(tensor->data))[i] = value;
} break;
default:
{
GGML_ASSERT(false);
} break;
}
+}
+
+float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
+ void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
+ switch (tensor->type) {
+ case GGML_TYPE_I8:
+ return ((int8_t *) data)[0];
+ case GGML_TYPE_I16:
+ return ((int16_t *) data)[0];
+ case GGML_TYPE_I32:
+ return ((int32_t *) data)[0];
+ case GGML_TYPE_F16:
+ return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
+ case GGML_TYPE_F32:
+ return ((float *) data)[0];
+ default:
+ GGML_ASSERT(false);
+ }
return 0.0f;
}
-void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
+void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) {
+ void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
switch (tensor->type) {
case GGML_TYPE_I8:
{
- GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
- ((int8_t *)(tensor->data))[i] = value;
+ ((int8_t *)(data))[0] = value;
} break;
case GGML_TYPE_I16:
{
- GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
- ((int16_t *)(tensor->data))[i] = value;
+ ((int16_t *)(data))[0] = value;
} break;
case GGML_TYPE_I32:
{
- GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
- ((int32_t *)(tensor->data))[i] = value;
+ ((int32_t *)(data))[0] = value;
} break;
case GGML_TYPE_F16:
{
- GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
- ((ggml_fp16_t *)(tensor->data))[i] = GGML_FP32_TO_FP16(value);
+ ((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
} break;
case GGML_TYPE_F32:
{
- GGML_ASSERT(tensor->nb[0] == sizeof(float));
- ((float *)(tensor->data))[i] = value;
+ ((float *)(data))[0] = value;
} break;
default:
{
return result;
}
+struct ggml_tensor * ggml_get_first_tensor(struct ggml_context * ctx) {
+ struct ggml_object * obj = ctx->objects_begin;
+
+ char * const mem_buffer = ctx->mem_buffer;
+
+ while (obj != NULL) {
+ if (obj->type == GGML_OBJECT_TENSOR) {
+ return (struct ggml_tensor *)(mem_buffer + obj->offs);
+ }
+
+ obj = obj->next;
+ }
+
+ return NULL;
+}
+
+struct ggml_tensor * ggml_get_next_tensor(struct ggml_context * ctx, struct ggml_tensor * tensor) {
+ struct ggml_object * obj = (struct ggml_object *) ((char *)tensor - GGML_OBJECT_SIZE);
+ obj = obj->next;
+
+ char * const mem_buffer = ctx->mem_buffer;
+
+ while (obj != NULL) {
+ if (obj->type == GGML_OBJECT_TENSOR) {
+ return (struct ggml_tensor *)(mem_buffer + obj->offs);
+ }
+
+ obj = obj->next;
+ }
+
+ return NULL;
+}
+
struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name) {
struct ggml_object * obj = ctx->objects_begin;
return ggml_add_impl(ctx, a, b, true);
}
+// ggml_add_cast
+
+static struct ggml_tensor * ggml_add_cast_impl(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ enum ggml_type type) {
+ // TODO: support less-strict constraint
+ // GGML_ASSERT(ggml_can_repeat(b, a));
+ GGML_ASSERT(ggml_can_repeat_rows(b, a));
+ GGML_ASSERT(ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16); // currently only supported for quantized input and f16
+
+ bool is_node = false;
+
+ if (a->grad || b->grad) {
+ // TODO: support backward pass for broadcasting
+ GGML_ASSERT(ggml_are_same_shape(a, b));
+ is_node = true;
+ }
+
+ struct ggml_tensor * result = ggml_new_tensor(ctx, type, a->n_dims, a->ne);
+
+ result->op = GGML_OP_ADD;
+ result->grad = is_node ? ggml_new_tensor(ctx, GGML_TYPE_F32, a->n_dims, a->ne) : NULL;
+ result->src[0] = a;
+ result->src[1] = b;
+
+ return result;
+}
+
+struct ggml_tensor * ggml_add_cast(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ enum ggml_type type) {
+ return ggml_add_cast_impl(ctx, a, b, type);
+}
+
// ggml_add1
static struct ggml_tensor * ggml_add1_impl(
return ggml_sqrt_impl(ctx, a, true);
}
-
// ggml_log
static struct ggml_tensor * ggml_log_impl(
return result;
}
-
// ggml_sum_rows
struct ggml_tensor * ggml_sum_rows(
result->op = GGML_OP_REPEAT;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src[0] = a;
- result->src[1] = b;
return result;
}
result->op = GGML_OP_REPEAT_BACK;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src[0] = a;
- result->src[1] = b;
return result;
}
return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_RELU);
}
+// ggml_leaky
+
+struct ggml_tensor * ggml_leaky(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a) {
+ return ggml_unary(ctx, a, GGML_UNARY_OP_LEAKY);
+}
+
// ggml_gelu
struct ggml_tensor * ggml_gelu(
is_node = true;
}
- const int64_t ne[4] = { a->ne[0], b->ne[0], a->ne[2], b->ne[3] };
- struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MIN(a->n_dims, b->n_dims), ne);
+ // a is broadcastable to b for ne[2] and ne[3] -> use b->ne[2] and b->ne[3]
+ const int64_t ne[4] = { a->ne[0], b->ne[0], b->ne[2], b->ne[3] };
+ struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, MAX(a->n_dims, b->n_dims), ne);
result->op = GGML_OP_OUT_PROD;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
return ggml_set_impl(ctx, a, b, nb1, a->nb[2], a->nb[3], offset, false);
}
-
// ggml_cpy
static struct ggml_tensor * ggml_cpy_impl(
return ggml_cont_impl(ctx, a, true);
}
+// make contiguous, with new shape
+GGML_API struct ggml_tensor * ggml_cont_1d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int64_t ne0) {
+ return ggml_cont_4d(ctx, a, ne0, 1, 1, 1);
+}
+
+GGML_API struct ggml_tensor * ggml_cont_2d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int64_t ne0,
+ int64_t ne1) {
+ return ggml_cont_4d(ctx, a, ne0, ne1, 1, 1);
+}
+
+GGML_API struct ggml_tensor * ggml_cont_3d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int64_t ne0,
+ int64_t ne1,
+ int64_t ne2) {
+ return ggml_cont_4d(ctx, a, ne0, ne1, ne2, 1);
+}
+
+struct ggml_tensor * ggml_cont_4d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int64_t ne0,
+ int64_t ne1,
+ int64_t ne2,
+ int64_t ne3) {
+ GGML_ASSERT(ggml_nelements(a) == (ne0*ne1*ne2*ne3));
+
+ bool is_node = false;
+
+ struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, ne0, ne1, ne2, ne3);
+ ggml_format_name(result, "%s (cont)", a->name);
+
+ result->op = GGML_OP_CONT;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src[0] = a;
+
+ return result;
+}
+
// ggml_reshape
struct ggml_tensor * ggml_reshape(
struct ggml_tensor * a,
struct ggml_tensor * b) {
GGML_ASSERT(ggml_is_contiguous(a));
- GGML_ASSERT(ggml_is_contiguous(b));
+ // as only the shape of b is relevant, and not its memory layout, b is allowed to be non contiguous.
GGML_ASSERT(ggml_nelements(a) == ggml_nelements(b));
bool is_node = false;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src[0] = a;
result->src[1] = b;
- result->src[2] = c;
return result;
}
return result;
}
-
// ggml_diag_mask_inf
static struct ggml_tensor * ggml_diag_mask_inf_impl(
return ggml_soft_max_impl(ctx, a, true);
}
-
// ggml_soft_max_back
static struct ggml_tensor * ggml_soft_max_back_impl(
static struct ggml_tensor * ggml_rope_impl(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
int mode,
int n_ctx,
+ int n_orig_ctx,
float freq_base,
float freq_scale,
+ float ext_factor,
+ float attn_factor,
+ float beta_fast,
+ float beta_slow,
float xpos_base,
bool xpos_down,
bool inplace) {
- GGML_ASSERT(n_past >= 0);
+ GGML_ASSERT(ggml_is_vector(b));
+ GGML_ASSERT(b->type == GGML_TYPE_I32);
+ GGML_ASSERT(a->ne[2] == b->ne[0]);
+
bool is_node = false;
if (a->grad) {
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
- int32_t params[8] = { n_past, n_dims, mode, n_ctx };
- memcpy(params + 4, &freq_base, sizeof(float));
- memcpy(params + 5, &freq_scale, sizeof(float));
- memcpy(params + 6, &xpos_base, sizeof(float));
- memcpy(params + 7, &xpos_down, sizeof(bool));
+ int32_t params[13] = { /*n_past*/ 0, n_dims, mode, n_ctx, n_orig_ctx };
+ memcpy(params + 5, &freq_base, sizeof(float));
+ memcpy(params + 6, &freq_scale, sizeof(float));
+ memcpy(params + 7, &ext_factor, sizeof(float));
+ memcpy(params + 8, &attn_factor, sizeof(float));
+ memcpy(params + 9, &beta_fast, sizeof(float));
+ memcpy(params + 10, &beta_slow, sizeof(float));
+ memcpy(params + 11, &xpos_base, sizeof(float));
+ memcpy(params + 12, &xpos_down, sizeof(bool));
ggml_set_op_params(result, params, sizeof(params));
result->op = GGML_OP_ROPE;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src[0] = a;
+ result->src[1] = b;
return result;
}
struct ggml_tensor * ggml_rope(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
int mode,
int n_ctx) {
- return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, false);
+ return ggml_rope_impl(
+ ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, false
+ );
}
struct ggml_tensor * ggml_rope_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
int mode,
int n_ctx) {
- return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, 10000.0f, 1.0f, 0.0f, false, true);
+ return ggml_rope_impl(
+ ctx, a, b, n_dims, mode, n_ctx, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, false, true
+ );
}
struct ggml_tensor * ggml_rope_custom(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
int mode,
int n_ctx,
+ int n_orig_ctx,
float freq_base,
- float freq_scale) {
- return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, false);
+ float freq_scale,
+ float ext_factor,
+ float attn_factor,
+ float beta_fast,
+ float beta_slow) {
+ return ggml_rope_impl(
+ ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, false
+ );
}
struct ggml_tensor * ggml_rope_custom_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
int mode,
int n_ctx,
+ int n_orig_ctx,
float freq_base,
- float freq_scale) {
- return ggml_rope_impl(ctx, a, n_past, n_dims, mode, n_ctx, freq_base, freq_scale, 0.0f, false, true);
+ float freq_scale,
+ float ext_factor,
+ float attn_factor,
+ float beta_fast,
+ float beta_slow) {
+ return ggml_rope_impl(
+ ctx, a, b, n_dims, mode, n_ctx, n_orig_ctx, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow, 0.0f, false, true
+ );
}
struct ggml_tensor * ggml_rope_xpos_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
float base,
bool down) {
- return ggml_rope_impl(ctx, a, n_past, n_dims, 0, 0, 10000.0f, 1.0f, base, down, true);
+ return ggml_rope_impl(ctx, a, b, n_dims, 0, 0, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, base, down, true);
}
// ggml_rope_back
struct ggml_tensor * ggml_rope_back(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
int mode,
int n_ctx,
float freq_scale,
float xpos_base,
bool xpos_down) {
- GGML_ASSERT(n_past >= 0);
+ GGML_ASSERT(ggml_is_vector(b));
+ GGML_ASSERT(b->type == GGML_TYPE_I32);
+ GGML_ASSERT(a->ne[2] == b->ne[0]);
+
GGML_ASSERT((mode & 4) == 0 && "ggml_rope_back() for ChatGLM not implemented yet");
bool is_node = false;
struct ggml_tensor * result = ggml_dup_tensor(ctx, a);
- int32_t params[8] = { n_past, n_dims, mode, n_ctx };
+ int32_t params[8] = { /*n_past*/ 0, n_dims, mode, n_ctx };
memcpy(params + 4, &freq_base, sizeof(float));
memcpy(params + 5, &freq_scale, sizeof(float));
memcpy(params + 6, &xpos_base, sizeof(float));
result->op = GGML_OP_ROPE_BACK;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src[0] = a;
+ result->src[1] = b;
return result;
}
return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
}
-GGML_API struct ggml_tensor * ggml_conv_1d(
- struct ggml_context * ctx,
- struct ggml_tensor * a,
- struct ggml_tensor * b,
- int s0,
- int p0,
- int d0) {
- GGML_ASSERT(ggml_is_matrix(b));
+// im2col: [N, IC, IL] => [N, OL, IC*K]
+// a: [OC,IC, K]
+// b: [N, IC, IL]
+// result: [N, OL, IC*K]
+static struct ggml_tensor * ggml_conv_1d_stage_0(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ int s0,
+ int p0,
+ int d0) {
GGML_ASSERT(a->ne[1] == b->ne[1]);
bool is_node = false;
is_node = true;
}
+ const int64_t OL = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0);
+
const int64_t ne[4] = {
- ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
- a->ne[2], 1, 1,
+ a->ne[1] * a->ne[0],
+ OL,
+ b->ne[2],
+ 1,
};
- struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
+ struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne);
int32_t params[] = { s0, p0, d0 };
ggml_set_op_params(result, params, sizeof(params));
- result->op = GGML_OP_CONV_1D;
+ result->op = GGML_OP_CONV_1D_STAGE_0;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src[0] = a;
+ result->src[1] = b;
+
+ return result;
+}
+
+// ggml_conv_1d_stage_1
+
+// gemm: [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K]
+// a: [OC, IC, K]
+// b: [N, OL, IC * K]
+// result: [N, OC, OL]
+static struct ggml_tensor * ggml_conv_1d_stage_1(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b) {
+
+ bool is_node = false;
+
+ if (a->grad || b->grad) {
+ GGML_ASSERT(false); // TODO: implement backward
+ is_node = true;
+ }
+
+ const int64_t ne[4] = {
+ b->ne[1],
+ a->ne[2],
+ b->ne[2],
+ 1,
+ };
+ struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
+
+ result->op = GGML_OP_CONV_1D_STAGE_1;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src[0] = a;
result->src[1] = b;
return result;
}
+// ggml_conv_1d
+
+GGML_API struct ggml_tensor * ggml_conv_1d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ int s0,
+ int p0,
+ int d0) {
+ struct ggml_tensor * result = ggml_conv_1d_stage_0(ctx, a, b, s0, p0, d0);
+ result = ggml_conv_1d_stage_1(ctx, a, result);
+ return result;
+}
+
+// GGML_API struct ggml_tensor * ggml_conv_1d(
+// struct ggml_context * ctx,
+// struct ggml_tensor * a,
+// struct ggml_tensor * b,
+// int s0,
+// int p0,
+// int d0) {
+// GGML_ASSERT(ggml_is_matrix(b));
+// GGML_ASSERT(a->ne[1] == b->ne[1]);
+// bool is_node = false;
+
+// if (a->grad || b->grad) {
+// GGML_ASSERT(false); // TODO: implement backward
+// is_node = true;
+// }
+
+// const int64_t ne[4] = {
+// ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
+// a->ne[2], 1, 1,
+// };
+// struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
+
+// int32_t params[] = { s0, p0, d0 };
+// ggml_set_op_params(result, params, sizeof(params));
+
+// result->op = GGML_OP_CONV_1D;
+// result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+// result->src[0] = a;
+// result->src[1] = b;
+
+// return result;
+// }
+
// ggml_conv_1d_ph
struct ggml_tensor* ggml_conv_1d_ph(
return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
}
+// ggml_conv_transpose_1d
+
+static int64_t ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
+ return (ins - 1) * s - 2 * p + d * (ks - 1) + 1;
+}
+
+GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ int s0,
+ int p0,
+ int d0) {
+ GGML_ASSERT(ggml_is_matrix(b));
+ GGML_ASSERT(a->ne[2] == b->ne[1]);
+ GGML_ASSERT(a->ne[3] == 1);
+
+ GGML_ASSERT(p0 == 0);
+ GGML_ASSERT(d0 == 1);
+
+ bool is_node = false;
+
+ if (a->grad || b->grad) {
+ GGML_ASSERT(false); // TODO: implement backward
+ is_node = true;
+ }
+
+ const int64_t ne[4] = {
+ ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/),
+ a->ne[1], b->ne[2], 1,
+ };
+ struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
+
+ int32_t params[] = { s0, p0, d0 };
+ ggml_set_op_params(result, params, sizeof(params));
+
+ result->op = GGML_OP_CONV_TRANSPOSE_1D;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src[0] = a;
+ result->src[1] = b;
+
+ return result;
+}
+
// ggml_conv_2d
-struct ggml_tensor * ggml_conv_2d(
+// im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
+// a: [OC,IC, KH, KW]
+// b: [N, IC, IH, IW]
+// result: [N, OH, OW, IC*KH*KW]
+static struct ggml_tensor * ggml_conv_2d_stage_0(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
int d0,
int d1) {
- GGML_ASSERT(a->ne[2] == b->ne[2]);
+ GGML_ASSERT(a->ne[2] == b->ne[2]);
+ bool is_node = false;
+
+ if (a->grad || b->grad) {
+ GGML_ASSERT(false); // TODO: implement backward
+ is_node = true;
+ }
+
+ const int64_t OH = ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1);
+ const int64_t OW = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0);
+
+ const int64_t ne[4] = {
+ a->ne[2] * a->ne[1] * a->ne[0],
+ OW,
+ OH,
+ b->ne[3],
+ };
+ struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne);
+
+ int32_t params[] = { s0, s1, p0, p1, d0, d1 };
+ ggml_set_op_params(result, params, sizeof(params));
+
+ result->op = GGML_OP_CONV_2D_STAGE_0;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src[0] = a;
+ result->src[1] = b;
+
+ return result;
+
+}
+
+// gemm: [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW]
+// a: [OC, IC, KH, KW]
+// b: [N, OH, OW, IC * KH * KW]
+// result: [N, OC, OH, OW]
+static struct ggml_tensor * ggml_conv_2d_stage_1(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b) {
+
bool is_node = false;
if (a->grad || b->grad) {
}
const int64_t ne[4] = {
- ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
- ggml_calc_conv_output_size(b->ne[1], a->ne[1], s1, p1, d1),
- a->ne[3], b->ne[3],
+ b->ne[1],
+ b->ne[2],
+ a->ne[3],
+ b->ne[3],
};
struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
- int32_t params[] = { s0, s1, p0, p1, d0, d1 };
- ggml_set_op_params(result, params, sizeof(params));
-
- result->op = GGML_OP_CONV_2D;
+ result->op = GGML_OP_CONV_2D_STAGE_1;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src[0] = a;
result->src[1] = b;
}
-// ggml_conv_2d_sk_p0
+// a: [OC,IC, KH, KW]
+// b: [N, IC, IH, IW]
+// result: [N, OC, OH, OW]
+struct ggml_tensor * ggml_conv_2d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ int s0,
+ int s1,
+ int p0,
+ int p1,
+ int d0,
+ int d1) {
+
+ struct ggml_tensor * result = ggml_conv_2d_stage_0(ctx, a, b, s0, s1, p0, p1, d0, d1); // [N, OH, OW, IC * KH * KW]
+ result = ggml_conv_2d_stage_1(ctx, a, result);
+
+ return result;
+
+}
+// ggml_conv_2d_sk_p0
struct ggml_tensor * ggml_conv_2d_sk_p0(
struct ggml_context * ctx,
struct ggml_tensor * a,
// ggml_pool_*
-static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, int p) {
+static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, float p) {
return (ins + 2 * p - ks) / s + 1;
}
int k1,
int s0,
int s1,
- int p0,
- int p1) {
+ float p0,
+ float p1) {
bool is_node = false;
// d shape [D,N,ne2,ne3]
// q shape [D,N,ne2,ne3]
- // k shape [D,M,ne2,ne3]
- // v shape [M,D,ne2,ne3]
+ // k shape [D,M,kvne2,ne3]
+ // v shape [M,D,kvne2,ne3]
- const int64_t D = q->ne[0];
- const int64_t N = q->ne[1];
- const int64_t M = k->ne[1];
- const int64_t ne2 = q->ne[2];
- const int64_t ne3 = q->ne[3];
+ const int64_t D = q->ne[0];
+ const int64_t N = q->ne[1];
+ const int64_t M = k->ne[1];
+ const int64_t ne2 = q->ne[2];
+ const int64_t ne3 = q->ne[3];
+ const int64_t kvne2 = k->ne[2];
GGML_ASSERT(k->ne[0] == D);
GGML_ASSERT(v->ne[0] == M);
GGML_ASSERT(v->ne[1] == D);
GGML_ASSERT(d->ne[0] == D);
GGML_ASSERT(d->ne[1] == N);
- GGML_ASSERT(k->ne[2] == ne2);
+ GGML_ASSERT(k->ne[2] == kvne2);
GGML_ASSERT(k->ne[3] == ne3);
- GGML_ASSERT(v->ne[2] == ne2);
+ GGML_ASSERT(v->ne[2] == kvne2);
GGML_ASSERT(v->ne[3] == ne3);
GGML_ASSERT(d->ne[2] == ne2);
GGML_ASSERT(d->ne[3] == ne3);
+ GGML_ASSERT(ne2 % kvne2 == 0);
+
bool is_node = false;
if (q->grad || k->grad || v->grad) {
}
// store gradients of q, k and v as continuous tensors concatenated in result.
- // q shape[D,N,ne2,ne3] ; k shape [D,M,ne2,ne3] ; v shape [M,D,ne2,ne3]
- // gradq->data = result->data
- // gradk->data = result->data + nb0*D*N*ne2*ne3
- // gradv->data = result->data + nb0*D*N*ne2*ne3 + nb0*D*M*ne2*ne3
// note: v and gradv are actually transposed, i.e. v->ne[0] != D.
- int64_t ne[4] = {D,M+N+M,ne2,ne3};
+ const int64_t elem_q = ggml_nelements(q);
+ const int64_t elem_k = ggml_nelements(k);
+ const int64_t elem_v = ggml_nelements(v);
- struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
+ enum ggml_type result_type = GGML_TYPE_F32;
+ GGML_ASSERT(ggml_blck_size(result_type) == 1);
+ const size_t tsize = ggml_type_size(result_type);
+
+ const size_t offs_q = 0;
+ const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
+ const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
+ const size_t end = offs_v + GGML_PAD(elem_v * tsize, GGML_MEM_ALIGN);
+
+ const size_t nelements = (end + tsize - 1)/tsize;
+
+ struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nelements);
int32_t masked_i = masked ? 1 : 0;
ggml_set_op_params(result, &masked_i, sizeof(masked_i));
return result;
}
-
struct ggml_tensor * ggml_add_rel_pos(
struct ggml_context * ctx,
struct ggml_tensor * a,
return ggml_map_custom3_impl(ctx, a, b, c, fun, n_tasks, userdata, true);
}
-
-
// ggml_cross_entropy_loss
struct ggml_tensor * ggml_cross_entropy_loss(
GGML_ASSERT(tensor->grad == NULL);
tensor->grad = ggml_dup_tensor(ctx, tensor);
+ ggml_format_name(tensor->grad, "%s (grad)", tensor->name);
}
// ggml_compute_forward_dup
return;
}
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
const int ith = params->ith; // thread index
const int nth = params->nth; // number of threads
return;
}
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
const int ith = params->ith; // thread index
const int nth = params->nth; // number of threads
const int nr = ggml_nrows(src0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
GGML_ASSERT( nb0 == sizeof(float));
GGML_ASSERT(nb00 == sizeof(float));
#else
ggml_vec_add_f32(ne00, dst_ptr, src0_ptr, src1_ptr);
#endif
- // }
- // }
}
} else {
// src1 is not contiguous
const int nr = ggml_nrows(src0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
- GGML_ASSERT(dst->type == GGML_TYPE_F16);
- GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
+ if (dst->type == GGML_TYPE_F32) {
+ GGML_ASSERT( nb0 == sizeof(float));
+ }
+ else {
+ GGML_ASSERT(dst->type == GGML_TYPE_F16);
+ GGML_ASSERT( nb0 == sizeof(ggml_fp16_t));
+ }
+
GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
// rows per thread
const int ir1 = MIN(ir0 + dr, nr);
if (nb10 == sizeof(float)) {
- for (int ir = ir0; ir < ir1; ++ir) {
- // src0, src1 and dst are same shape => same indices
- const int i3 = ir/(ne2*ne1);
- const int i2 = (ir - i3*ne2*ne1)/ne1;
- const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
-
- ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
- ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
- float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
-
- for (int i = 0; i < ne0; i++) {
- dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
+ if (dst->type == GGML_TYPE_F16) {
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
+ ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
+
+ for (int i = 0; i < ne0; i++) {
+ dst_ptr[i] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i]);
+ }
+ }
+ } else {
+ for (int ir = ir0; ir < ir1; ++ir) {
+ // src0, src1 and dst are same shape => same indices
+ const int i3 = ir/(ne2*ne1);
+ const int i2 = (ir - i3*ne2*ne1)/ne1;
+ const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ float * dst_ptr = (float *) ((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1);
+ ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01);
+ float * src1_ptr = (float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11);
+
+ for (int i = 0; i < ne0; i++) {
+ dst_ptr[i] = GGML_FP16_TO_FP32(src0_ptr[i]) + src1_ptr[i];
+ }
}
}
}
const int nr = ggml_nrows(src0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F16);
const int nr = ggml_nrows(src0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
const int ith = params->ith;
const int nth = params->nth;
const enum ggml_type type = src0->type;
+ const enum ggml_type dtype = dst->type;
ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
- ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
+ ggml_from_float_t const quantize_row_q = type_traits[dtype].from_float;
// we don't support permuted src0 or src1
GGML_ASSERT(nb00 == ggml_type_size(type));
GGML_ASSERT(nb2 <= nb3);
GGML_ASSERT(ggml_is_quantized(src0->type));
- GGML_ASSERT(dst->type == src0->type);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
// rows per thread
// add src1
ggml_vec_acc_f32(ne00, wdata, src1_row);
// quantize row to dst
- quantize_row_q(wdata, dst_row, ne00);
+ if (quantize_row_q != NULL) {
+ quantize_row_q(wdata, dst_row, ne00);
+ } else {
+ memcpy(dst_row, wdata, ne0*nb0);
+ }
}
}
const int nr = ggml_nrows(src0);
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
GGML_ASSERT( nb0 == sizeof(float));
GGML_ASSERT(nb00 == sizeof(float));
const int nr = ggml_nrows(src0);
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
const int nr = ggml_nrows(src0);
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F16);
const int nr = ggml_nrows(src0);
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
const enum ggml_type type = src0->type;
ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
}
}
-
// ggml_compute_forward_acc
static void ggml_compute_forward_acc_f32(
const int nr = ggml_nrows(src1);
const int nc = src1->ne[0];
- GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
- GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
+ GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
+ GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
// src0 and dst as viewed during acc
const size_t nb0 = ggml_element_size(src0);
const int nr = ggml_nrows(src0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
GGML_ASSERT( nb0 == sizeof(float));
GGML_ASSERT(nb00 == sizeof(float));
const int i2 = (ir - i3*ne2*ne1)/ne1;
const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
-
#ifdef GGML_USE_ACCELERATE
vDSP_vsub(
(float *) ((char *) src1->data + i3*nb13 + i2*nb12 + i1*nb11), 1,
const int64_t nr = ggml_nrows(src0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
GGML_ASSERT( nb0 == sizeof(float));
GGML_ASSERT(nb00 == sizeof(float));
const int nr = ggml_nrows(src0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
GGML_ASSERT( nb0 == sizeof(float));
GGML_ASSERT(nb00 == sizeof(float));
const int i2 = (ir - i3*ne2*ne1)/ne1;
const int i1 = (ir - i3*ne2*ne1 - i2*ne1);
-
#ifdef GGML_USE_ACCELERATE
UNUSED(ggml_vec_div_f32);
}
}
-
// ggml_compute_forward_log
static void ggml_compute_forward_log_f32(
assert(ggml_is_scalar(dst));
assert(src0->nb[0] == sizeof(float));
- GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
- GGML_TENSOR_LOCALS(size_t, nb0, src0, nb);
+ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
+ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
ggml_float sum = 0;
ggml_float row_sum = 0;
assert(src0->nb[0] == sizeof(ggml_fp16_t));
- GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
- GGML_TENSOR_LOCALS(size_t, nb0, src0, nb);
+ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
+ GGML_TENSOR_LOCALS(size_t, nb0, src0, nb)
float sum = 0;
float row_sum = 0;
GGML_ASSERT(src0->nb[0] == sizeof(float));
GGML_ASSERT(dst->nb[0] == sizeof(float));
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
GGML_ASSERT(ne0 == 1);
GGML_ASSERT(ne1 == ne01);
assert(src0->nb[0] == sizeof(float));
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
assert(ne0 == 1);
assert(ne1 == ne01);
return;
}
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
// guaranteed to be an integer due to the check in ggml_can_repeat
const int nr0 = (int)(ne0/ne00);
}
}
+static void ggml_compute_forward_repeat_f16(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(params->ith == 0);
+ GGML_ASSERT(ggml_can_repeat(src0, dst));
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ GGML_TENSOR_UNARY_OP_LOCALS;
+
+ // guaranteed to be an integer due to the check in ggml_can_repeat
+ const int nr0 = (int)(ne0/ne00);
+ const int nr1 = (int)(ne1/ne01);
+ const int nr2 = (int)(ne2/ne02);
+ const int nr3 = (int)(ne3/ne03);
+
+ // TODO: support for transposed / permuted tensors
+ GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
+
+ // TODO: maybe this is not optimal?
+ for (int i3 = 0; i3 < nr3; i3++) {
+ for (int k3 = 0; k3 < ne03; k3++) {
+ for (int i2 = 0; i2 < nr2; i2++) {
+ for (int k2 = 0; k2 < ne02; k2++) {
+ for (int i1 = 0; i1 < nr1; i1++) {
+ for (int k1 = 0; k1 < ne01; k1++) {
+ for (int i0 = 0; i0 < nr0; i0++) {
+ ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3*ne03 + k3)*nb3 + (i2*ne02 + k2)*nb2 + (i1*ne01 + k1)*nb1 + (i0*ne00)*nb0);
+ ggml_fp16_t * x = (ggml_fp16_t *) ((char *) src0->data + ( k3)*nb03 + ( k2)*nb02 + ( k1)*nb01);
+ // ggml_vec_cpy_f16(ne00, y, x)
+ for (int i = 0; i < ne00; ++i) {
+ y[i] = x[i];
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
static void ggml_compute_forward_repeat(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
struct ggml_tensor * dst) {
switch (src0->type) {
+ case GGML_TYPE_F16:
+ {
+ ggml_compute_forward_repeat_f16(params, src0, dst);
+ } break;
case GGML_TYPE_F32:
{
ggml_compute_forward_repeat_f32(params, src0, dst);
return;
}
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
// guaranteed to be an integer due to the check in ggml_can_repeat
const int nr0 = (int)(ne00/ne0);
const int ith = params->ith;
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
// TODO: support for transposed / permuted tensors
GGML_ASSERT(nb0 == sizeof(float));
#ifndef NDEBUG
for (int k = 0; k < nc; k++) {
- const float x = ((float *) ((char *) dst->data + i1*( dst->nb[1])))[k];
+ const float x = ((float *) ((char *) dst->data + i1*(dst->nb[1])))[k];
UNUSED(x);
assert(!isnan(x));
assert(!isinf(x));
}
}
+// ggml_compute_forward_leaky
+
+static void ggml_compute_forward_leaky_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ assert(params->ith == 0);
+ assert(ggml_are_same_shape(src0, dst));
+
+ if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ const int n = ggml_nrows(src0);
+ const int nc = src0->ne[0];
+
+ assert(dst->nb[0] == sizeof(float));
+ assert(src0->nb[0] == sizeof(float));
+
+ for (int i = 0; i < n; i++) {
+ ggml_vec_leaky_f32(nc,
+ (float *) ((char *) dst->data + i*( dst->nb[1])),
+ (float *) ((char *) src0->data + i*(src0->nb[1])));
+ }
+}
+
+static void ggml_compute_forward_leaky(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F32:
+ {
+ ggml_compute_forward_leaky_f32(params, src0, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
// ggml_compute_forward_silu_back
static void ggml_compute_forward_silu_back_f32(
const int ith = params->ith;
const int nth = params->nth;
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
float eps;
memcpy(&eps, dst->op_params, sizeof(float));
const int ith = params->ith;
const int nth = params->nth;
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
float eps;
memcpy(&eps, dst->op_params, sizeof(float));
const int ith = params->ith;
const int nth = params->nth;
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
float eps;
memcpy(&eps, dst->op_params, sizeof(float));
const int ith = params->ith;
const int nth = params->nth;
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
const float eps = 1e-6f; // TODO: make this a parameter
int64_t t0 = ggml_perf_time_us();
UNUSED(t0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
const int ith = params->ith;
const int nth = params->nth;
#if defined(GGML_USE_CLBLAST)
if (ggml_cl_can_mul_mat(src0, src1, dst)) {
- // TODO: handle case when src0 is broadcast-able into src1 across 2nd,3rd dimension
- // ref: https://github.com/ggerganov/ggml/pull/224
- GGML_ASSERT(ne02 == ne12);
- GGML_ASSERT(ne03 == ne13);
-
if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
}
for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) {
vec_dot(ne00, &tmp[ir0 - iir0], src0_row + ir0*nb01, src1_col);
}
- memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
+ memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float));
+ }
+ }
+ }
+}
+
+// ggml_compute_forward_out_prod
+
+static void ggml_compute_forward_out_prod_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ // int64_t t0 = ggml_perf_time_us();
+ // UNUSED(t0);
+
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ GGML_ASSERT(ne02 == ne12);
+ GGML_ASSERT(ne03 == ne13);
+ GGML_ASSERT(ne2 == ne12);
+ GGML_ASSERT(ne3 == ne13);
+
+ // we don't support permuted src0 or src1
+ GGML_ASSERT(nb00 == sizeof(float));
+
+ // dst cannot be transposed or permuted
+ GGML_ASSERT(nb0 == sizeof(float));
+ // GGML_ASSERT(nb0 <= nb1);
+ // GGML_ASSERT(nb1 <= nb2);
+ // GGML_ASSERT(nb2 <= nb3);
+
+ GGML_ASSERT(ne0 == ne00);
+ GGML_ASSERT(ne1 == ne10);
+ GGML_ASSERT(ne2 == ne02);
+ GGML_ASSERT(ne3 == ne03);
+
+ // nb01 >= nb00 - src0 is not transposed
+ // compute by src0 rows
+
+ // TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
+ // TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
+
+ if (params->type == GGML_TASK_INIT) {
+ ggml_vec_set_f32(ne0*ne1*ne2*ne3, dst->data, 0);
+ return;
+ }
+
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // dst[:,:,:,:] = 0
+ // for i2,i3:
+ // for i1:
+ // for i01:
+ // for i0:
+ // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
+
+ // parallelize by last three dimensions
+
+ // total rows in dst
+ const int64_t nr = ne1*ne2*ne3;
+
+ // rows per thread
+ const int64_t dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int64_t ir0 = dr*ith;
+ const int64_t ir1 = MIN(ir0 + dr, nr);
+
+ // block-tiling attempt
+ const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32);
+ const int64_t blck_1 = 16;
+
+ for (int64_t bir = ir0; bir < ir1; bir += blck_1) {
+ const int64_t bir1 = MIN(bir + blck_1, ir1);
+ for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) {
+ const int64_t bne01 = MIN(bi01 + blck_0, ne01);
+ for (int64_t ir = bir; ir < bir1; ++ir) {
+ // dst indices
+ const int64_t i3 = ir/(ne2*ne1);
+ const int64_t i2 = (ir - i3*ne2*ne1)/ne1;
+ const int64_t i1 = (ir - i3*ne2*ne1 - i2*ne1);
+
+ const int64_t i02 = i2;
+ const int64_t i03 = i3;
+
+ //const int64_t i10 = i1;
+ const int64_t i12 = i2;
+ const int64_t i13 = i3;
+
+#if GGML_VEC_MAD_UNROLL > 2
+ const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL);
+ for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) {
+ const int64_t i11 = i01;
+
+ float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
+ float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
+ float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
+
+ ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1);
+ }
+ for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) {
+ const int64_t i11 = i01;
+
+ float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
+ float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
+ float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
+
+ ggml_vec_mad_f32(ne0, d, s0, *s1);
+ }
+#else
+ for (int64_t i01 = bi01; i01 < bne01; ++i01) {
+ const int64_t i11 = i01;
+
+ float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03));
+ float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
+ float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
+
+ ggml_vec_mad_f32(ne0, d, s0, *s1);
+ }
+#endif
}
}
}
-}
-// ggml_compute_forward_out_prod
+ //int64_t t1 = ggml_perf_time_us();
+ //static int64_t acc = 0;
+ //acc += t1 - t0;
+ //if (t1 - t0 > 10) {
+ // printf("\n");
+ // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
+ // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
+ // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
+ // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
-static void ggml_compute_forward_out_prod_f32(
+ // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
+ //}
+}
+
+static void ggml_compute_forward_out_prod_q_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
- int64_t t0 = ggml_perf_time_us();
- UNUSED(t0);
+ // int64_t t0 = ggml_perf_time_us();
+ // UNUSED(t0);
GGML_TENSOR_BINARY_OP_LOCALS;
const int ith = params->ith;
const int nth = params->nth;
+ const enum ggml_type type = src0->type;
+ ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
+
GGML_ASSERT(ne02 == ne12);
GGML_ASSERT(ne03 == ne13);
GGML_ASSERT(ne2 == ne12);
GGML_ASSERT(ne3 == ne13);
- // we don't support permuted src0 or src1
- GGML_ASSERT(nb00 == sizeof(float));
+ // we don't support permuted src0 dim0
+ GGML_ASSERT(nb00 == ggml_type_size(type));
- // dst cannot be transposed or permuted
+ // dst dim0 cannot be transposed or permuted
GGML_ASSERT(nb0 == sizeof(float));
// GGML_ASSERT(nb0 <= nb1);
// GGML_ASSERT(nb1 <= nb2);
// for i0:
// dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3]
+ float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith;
+
for (int64_t ir = ir0; ir < ir1; ++ir) {
// dst indices
const int64_t i3 = ir/(ne2*ne1);
float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13));
float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3));
- ggml_vec_mad_f32(ne0, d, s0, *s1);
- // for (int64_t i0 = 0; i0 < ne0; ++i0) {
- // d[i0] += s0[i0] * s1[i1];
- // }
+ dequantize_row_q(s0, wdata, ne0);
+ ggml_vec_mad_f32(ne0, d, wdata, *s1);
}
}
case GGML_TYPE_Q5_0:
case GGML_TYPE_Q5_1:
case GGML_TYPE_Q8_0:
- case GGML_TYPE_Q8_1:
+ case GGML_TYPE_Q2_K:
+ case GGML_TYPE_Q3_K:
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q5_K:
+ case GGML_TYPE_Q6_K:
{
- GGML_ASSERT(false); // todo
- // ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
+ ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
} break;
case GGML_TYPE_F16:
{
const size_t nb1 = dst->nb[1];
-
for (int i1 = ir0; i1 < ir1; i1++) {
if (dst->data != src0->data) {
// src0 is same shape as dst => same indices
const int nr = ggml_nrows(src1);
const int nc = src1->ne[0];
- GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
- GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
+ GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne)
+ GGML_TENSOR_LOCALS(size_t, nb1, src1, nb)
// src0 and dst as viewed during set
const size_t nb0 = ggml_element_size(src0);
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
- const struct ggml_tensor * opt0,
struct ggml_tensor * dst) {
GGML_ASSERT(params->ith == 0);
- GGML_ASSERT(ggml_are_same_shape(opt0, dst));
- GGML_ASSERT(ggml_is_contiguous(opt0));
GGML_ASSERT(ggml_is_contiguous(dst));
- ggml_compute_forward_dup_same_cont(params, opt0, dst);
+ // ggml_compute_forward_dup_same_cont(params, opt0, dst);
+
+ if (params->type == GGML_TASK_INIT) {
+ memset(dst->data, 0, ggml_nbytes(dst));
+ }
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
- const struct ggml_tensor * opt0,
struct ggml_tensor * dst) {
GGML_ASSERT(params->ith == 0);
- GGML_ASSERT(ggml_are_same_shape(opt0, dst));
- GGML_ASSERT(ggml_is_contiguous(opt0));
GGML_ASSERT(ggml_is_contiguous(dst));
// ggml_compute_forward_dup_same_cont(params, opt0, dst);
}
}
-
static void ggml_compute_forward_get_rows_back(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
- const struct ggml_tensor * opt0,
struct ggml_tensor * dst) {
switch (src0->type) {
case GGML_TYPE_F16:
{
- ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, opt0, dst);
+ ggml_compute_forward_get_rows_back_f32_f16(params, src0, src1, dst);
} break;
case GGML_TYPE_F32:
{
- ggml_compute_forward_get_rows_back_f32(params, src0, src1, opt0, dst);
+ ggml_compute_forward_get_rows_back_f32(params, src0, src1, dst);
} break;
default:
{
// TODO: handle transposed/permuted matrices
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
GGML_ASSERT(ne00 == ne0);
GGML_ASSERT(ne00 == ne1);
// const float val = (sp[i] == -INFINITY) ? 0.0 : exp(sp[i] - max);
ggml_fp16_t s = GGML_FP32_TO_FP16(sp[i] - max);
memcpy(&scvt, &s, sizeof(scvt));
- const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
+ const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
sum += (ggml_float)val;
dp[i] = val;
}
return;
}
- const int n_past = ((int32_t *) dst->op_params)[0];
+ //const int n_past = ((int32_t *) dst->op_params)[0];
const int n_head = ((int32_t *) dst->op_params)[1];
float max_bias;
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
- assert(n_past >= 0);
-
- const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
- const int ne1 = src0->ne[1]; // seq_len_without_past
- const int ne2 = src0->ne[2]; // n_head -> this is k
- //const int ne3 = src0->ne[3]; // 1 -> bsz
+ const int64_t ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
+ const int64_t ne1 = src0->ne[1]; // seq_len_without_past
+ const int64_t ne2 = src0->ne[2]; // n_head -> this is k
+ //const int64_t ne3 = src0->ne[3]; // 1 -> bsz
- const int n = ggml_nrows(src0);
- const int ne2_ne3 = n/ne1; // ne2*ne3
+ const int64_t n = ggml_nrows(src0);
+ const int64_t ne2_ne3 = n/ne1; // ne2*ne3
- const int nb0 = src0->nb[0];
- const int nb1 = src0->nb[1];
- const int nb2 = src0->nb[2];
+ const size_t nb0 = src0->nb[0];
+ const size_t nb1 = src0->nb[1];
+ const size_t nb2 = src0->nb[2];
//const int nb3 = src0->nb[3];
GGML_ASSERT(nb0 == sizeof(float));
- GGML_ASSERT(ne1 + n_past == ne0);
GGML_ASSERT(n_head == ne2);
// add alibi to src0 (KQ_scaled)
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
- for (int i = 0; i < ne0; i++) {
- for (int j = 0; j < ne1; j++) {
- for (int k = 0; k < ne2_ne3; k++) {
+ for (int64_t i = 0; i < ne0; i++) {
+ for (int64_t j = 0; j < ne1; j++) {
+ for (int64_t k = 0; k < ne2_ne3; k++) {
float * const src = (float *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
}
pdst[0] = i * m_k + src[0];
-
}
}
}
return;
}
- const int n_past = ((int32_t *) dst->op_params)[0];
+ //const int n_past = ((int32_t *) dst->op_params)[0];
const int n_head = ((int32_t *) dst->op_params)[1];
float max_bias;
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
- assert(n_past >= 0);
-
const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
const int ne1 = src0->ne[1]; // seq_len_without_past
const int ne2 = src0->ne[2]; // n_head -> this is k
//const int nb3 = src0->nb[3];
GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
- GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
+ //GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
GGML_ASSERT(n_head == ne2);
// add alibi to src0 (KQ_scaled)
// ggml_compute_forward_rope
+static float rope_yarn_ramp(const float low, const float high, const int i0) {
+ const float y = (i0 / 2 - low) / MAX(0.001f, high - low);
+ return 1 - MIN(1, MAX(0, y));
+}
+
+// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
+// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
+static void rope_yarn(
+ float theta_extrap, float freq_scale, float corr_dims[2], int64_t i0, float ext_factor, float mscale,
+ float * cos_theta, float * sin_theta
+) {
+ // Get n-d rotational scaling corrected for extrapolation
+ float theta_interp = freq_scale * theta_extrap;
+ float theta = theta_interp;
+ if (ext_factor != 0.0f) {
+ float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor;
+ theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
+
+ // Get n-d magnitude scaling corrected for interpolation
+ mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale);
+ }
+ *cos_theta = cosf(theta) * mscale;
+ *sin_theta = sinf(theta) * mscale;
+}
+
+// Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
+// `corr_dim(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
+static float ggml_rope_yarn_corr_dim(int n_dims, int n_orig_ctx, float n_rot, float base) {
+ return n_dims * logf(n_orig_ctx / (n_rot * 2 * (float)M_PI)) / (2 * logf(base));
+}
+
+void ggml_rope_yarn_corr_dims(
+ int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]
+) {
+ // start and end correction dims
+ dims[0] = MAX(0, floorf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_fast, freq_base)));
+ dims[1] = MIN(n_dims - 1, ceilf(ggml_rope_yarn_corr_dim(n_dims, n_orig_ctx, beta_slow, freq_base)));
+}
+
static void ggml_compute_forward_rope_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
-
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;
}
- float freq_base;
- float freq_scale;
+ float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
// these two only relevant for xPos RoPE:
float xpos_base;
- bool xpos_down;
+ bool xpos_down;
- const int n_past = ((int32_t *) dst->op_params)[0];
- const int n_dims = ((int32_t *) dst->op_params)[1];
- const int mode = ((int32_t *) dst->op_params)[2];
- const int n_ctx = ((int32_t *) dst->op_params)[3];
- memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
- memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
- memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float));
- memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool));
+ //const int n_past = ((int32_t *) dst->op_params)[0];
+ const int n_dims = ((int32_t *) dst->op_params)[1];
+ const int mode = ((int32_t *) dst->op_params)[2];
+ const int n_ctx = ((int32_t *) dst->op_params)[3];
+ const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
- assert(n_past >= 0);
+ memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
+ memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
+ memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
+ memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
+ memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
+ memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
+ memcpy(&xpos_base, (int32_t *) dst->op_params + 11, sizeof(float));
+ memcpy(&xpos_down, (int32_t *) dst->op_params + 12, sizeof(bool));
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
//printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
//printf("n_past = %d, ne2 = %d\n", n_past, ne2);
int ir = 0;
const float theta_scale = powf(freq_base, -2.0f/n_dims);
+ const float inv_ndims = -1.f/n_dims;
+ float corr_dims[2];
+ ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
const bool is_neox = mode & 2;
const bool is_glm = mode & 4;
+ const int32_t * pos = (const int32_t *) src1->data;
+
for (int64_t i3 = 0; i3 < ne3; i3++) {
- for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
- const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
+ for (int64_t i2 = 0; i2 < ne2; i2++) {
+ const int64_t p = pos[i2];
for (int64_t i1 = 0; i1 < ne1; i1++) {
if (ir++ < ir0) continue;
if (ir > ir1) break;
- float theta = freq_scale * (float)p;
+ float theta_base = (float)p;
if (is_glm) {
- theta = MIN(p, n_ctx - 2);
+ theta_base = MIN(p, n_ctx - 2);
float block_theta = MAX(p - (n_ctx - 2), 0);
for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
- const float cos_theta = cosf(theta);
- const float sin_theta = sinf(theta);
+ const float cos_theta = cosf(theta_base);
+ const float sin_theta = sinf(theta_base);
const float cos_block_theta = cosf(block_theta);
const float sin_block_theta = sinf(block_theta);
- theta *= theta_scale;
+ theta_base *= theta_scale;
block_theta *= theta_scale;
const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
}
} else if (!is_neox) {
for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
- const float cos_theta = cosf(theta);
- const float sin_theta = sinf(theta);
+ float cos_theta, sin_theta;
+ rope_yarn(
+ theta_base, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta
+ );
+
// zeta scaling for xPos only:
- float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), (n_past + i2) / xpos_base) : 1.0f;
+ float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
if (xpos_down) zeta = 1.0f / zeta;
- theta *= theta_scale;
+ theta_base *= theta_scale;
const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
} else {
// TODO: this might be wrong for ne0 != n_dims - need double check
// ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
+ theta_base *= freq_scale;
for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
for (int64_t ic = 0; ic < n_dims; ic += 2) {
- const float cos_theta = cosf(theta);
- const float sin_theta = sinf(theta);
+ // simplified from `(ib * n_dims + ic) * inv_ndims`
+ float cur_rot = inv_ndims * ic - ib;
+
+ float cos_theta, sin_theta;
+ rope_yarn(
+ theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
+ &cos_theta, &sin_theta
+ );
- theta *= theta_scale;
+ theta_base *= theta_scale;
const int64_t i0 = ib*n_dims + ic/2;
static void ggml_compute_forward_rope_f16(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
-
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
return;
}
- float freq_base;
- float freq_scale;
-
- const int n_past = ((int32_t *) dst->op_params)[0];
- const int n_dims = ((int32_t *) dst->op_params)[1];
- const int mode = ((int32_t *) dst->op_params)[2];
- const int n_ctx = ((int32_t *) dst->op_params)[3];
- memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
- memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
+ float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
- assert(n_past >= 0);
+ //const int n_past = ((int32_t *) dst->op_params)[0];
+ const int n_dims = ((int32_t *) dst->op_params)[1];
+ const int mode = ((int32_t *) dst->op_params)[2];
+ const int n_ctx = ((int32_t *) dst->op_params)[3];
+ const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
+ memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
+ memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
+ memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
+ memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
+ memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
+ memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
//printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
//printf("n_past = %d, ne2 = %d\n", n_past, ne2);
int ir = 0;
const float theta_scale = powf(freq_base, -2.0f/n_dims);
+ const float inv_ndims = -1.f/n_dims;
+ float corr_dims[2];
+ ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
const bool is_neox = mode & 2;
const bool is_glm = mode & 4;
+ const int32_t * pos = (const int32_t *) src1->data;
+
for (int64_t i3 = 0; i3 < ne3; i3++) {
- for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
- const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
+ for (int64_t i2 = 0; i2 < ne2; i2++) {
+ const int64_t p = pos[i2];
for (int64_t i1 = 0; i1 < ne1; i1++) {
if (ir++ < ir0) continue;
if (ir > ir1) break;
- float theta = freq_scale * (float)p;
+ float theta_base = (float)p;
if (is_glm) {
- theta = MIN(p, n_ctx - 2);
+ theta_base = MIN(p, n_ctx - 2);
float block_theta = MAX(p - (n_ctx - 2), 0);
for (int64_t i0 = 0; i0 < ne0 / 4; i0++) {
- const float cos_theta = cosf(theta);
- const float sin_theta = sinf(theta);
+ const float cos_theta = cosf(theta_base);
+ const float sin_theta = sinf(theta_base);
const float cos_block_theta = cosf(block_theta);
const float sin_block_theta = sinf(block_theta);
- theta *= theta_scale;
+ theta_base *= theta_scale;
block_theta *= theta_scale;
const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta);
dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta);
}
- } if (!is_neox) {
+ } else if (!is_neox) {
for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
- const float cos_theta = cosf(theta);
- const float sin_theta = sinf(theta);
+ float cos_theta, sin_theta;
+ rope_yarn(
+ theta_base, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta
+ );
- theta *= theta_scale;
+ theta_base *= theta_scale;
const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
} else {
// TODO: this might be wrong for ne0 != n_dims - need double check
// ref: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py#LL251C1-L294C28
+ theta_base *= freq_scale;
for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
for (int64_t ic = 0; ic < n_dims; ic += 2) {
- const float cos_theta = cosf(theta);
- const float sin_theta = sinf(theta);
+ // simplified from `(ib * n_dims + ic) * inv_ndims`
+ float cur_rot = inv_ndims * ic - ib;
- theta *= theta_scale;
+ float cos_theta, sin_theta;
+ rope_yarn(
+ theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
+ &cos_theta, &sin_theta
+ );
+
+ theta_base *= theta_scale;
const int64_t i0 = ib*n_dims + ic/2;
static void ggml_compute_forward_rope(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
switch (src0->type) {
case GGML_TYPE_F16:
{
- ggml_compute_forward_rope_f16(params, src0, dst);
+ ggml_compute_forward_rope_f16(params, src0, src1, dst);
} break;
case GGML_TYPE_F32:
{
- ggml_compute_forward_rope_f32(params, src0, dst);
+ ggml_compute_forward_rope_f32(params, src0, src1, dst);
} break;
default:
{
static void ggml_compute_forward_rope_back_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
float xpos_base;
bool xpos_down;
- const int n_past = ((int32_t *) dst->op_params)[0];
+ //const int n_past = ((int32_t *) dst->op_params)[0];
const int n_dims = ((int32_t *) dst->op_params)[1];
const int mode = ((int32_t *) dst->op_params)[2];
const int n_ctx = ((int32_t *) dst->op_params)[3]; UNUSED(n_ctx);
memcpy(&xpos_base, (int32_t *) dst->op_params + 6, sizeof(float));
memcpy(&xpos_down, (int32_t *) dst->op_params + 7, sizeof(bool));
- assert(n_past >= 0);
-
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
//printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
//printf("n_past = %d, ne2 = %d\n", n_past, ne2);
const bool is_neox = mode & 2;
+ const int32_t * pos = (const int32_t *) src1->data;
+
for (int64_t i3 = 0; i3 < ne3; i3++) {
- for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
- const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
+ for (int64_t i2 = 0; i2 < ne2; i2++) {
+ const int64_t p = pos[i2];
for (int64_t i1 = 0; i1 < ne1; i1++) {
if (ir++ < ir0) continue;
if (ir > ir1) break;
- float theta = freq_scale * (float)p;
+ float theta_base = freq_scale * (float)p;
if (!is_neox) {
for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
- const float cos_theta = cosf(theta);
- const float sin_theta = sinf(theta);
+ const float cos_theta = cosf(theta_base);
+ const float sin_theta = sinf(theta_base);
+
// zeta scaling for xPos only:
- float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), (n_past + i2) / xpos_base) : 1.0f;
+ float zeta = xpos_base != 0.0f ? powf((i0 + 0.4f * ne0) / (1.4f * ne0), p / xpos_base) : 1.0f;
if (xpos_down) zeta = 1.0f / zeta;
- theta *= theta_scale;
+ theta_base *= theta_scale;
const float * const dy = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
float * dx = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
} else {
for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
for (int64_t ic = 0; ic < n_dims; ic += 2) {
- const float cos_theta = cosf(theta);
- const float sin_theta = sinf(theta);
+ const float cos_theta = cosf(theta_base);
+ const float sin_theta = sinf(theta_base);
- theta *= theta_scale;
+ theta_base *= theta_scale;
const int64_t i0 = ib*n_dims + ic/2;
static void ggml_compute_forward_rope_back_f16(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
// dx = rope_back(dy, src1)
// src0 is dy, src1 contains options
- const int n_past = ((int32_t *) dst->op_params)[0];
+ //const int n_past = ((int32_t *) dst->op_params)[0];
const int n_dims = ((int32_t *) dst->op_params)[1];
const int mode = ((int32_t *) dst->op_params)[2];
- assert(n_past >= 0);
-
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
//printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3);
//printf("n_past = %d, ne2 = %d\n", n_past, ne2);
const bool is_neox = mode & 2;
+ const int32_t * pos = (const int32_t *) src1->data;
+
for (int64_t i3 = 0; i3 < ne3; i3++) {
- for (int64_t i2 = ((mode & 1) == 0 ? 0 : n_past); i2 < ne2; i2++) {
- const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
+ for (int64_t i2 = 0; i2 < ne2; i2++) {
+ const int64_t p = pos[i2];
for (int64_t i1 = 0; i1 < ne1; i1++) {
if (ir++ < ir0) continue;
if (ir > ir1) break;
- float theta = (float)p;
+ float theta_base = (float)p;
if (!is_neox) {
for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
- const float cos_theta = cosf(theta);
- const float sin_theta = sinf(theta);
+ const float cos_theta = cosf(theta_base);
+ const float sin_theta = sinf(theta_base);
- theta *= theta_scale;
+ theta_base *= theta_scale;
const ggml_fp16_t * const dy = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
ggml_fp16_t * dx = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
} else {
for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
for (int64_t ic = 0; ic < n_dims; ic += 2) {
- const float cos_theta = cosf(theta);
- const float sin_theta = sinf(theta);
+ const float cos_theta = cosf(theta_base);
+ const float sin_theta = sinf(theta_base);
- theta *= theta_scale;
+ theta_base *= theta_scale;
const int64_t i0 = ib*n_dims + ic/2;
static void ggml_compute_forward_rope_back(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
switch (src0->type) {
case GGML_TYPE_F16:
{
- ggml_compute_forward_rope_back_f16(params, src0, dst);
+ ggml_compute_forward_rope_back_f16(params, src0, src1, dst);
} break;
case GGML_TYPE_F32:
{
- ggml_compute_forward_rope_back_f32(params, src0, dst);
+ ggml_compute_forward_rope_back_f32(params, src0, src1, dst);
} break;
default:
{
// ggml_compute_forward_conv_1d
-static void ggml_compute_forward_conv_1d_s1_ph_f16_f32(
+static void ggml_compute_forward_conv_1d_f16_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
int64_t t0 = ggml_perf_time_us();
UNUSED(t0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
const int ith = params->ith;
const int nth = params->nth;
const int nk = ne00;
- const int nh = nk/2;
- const int ew0 = ggml_up32(ne01);
+ // size of the convolution row - the kernel size unrolled across all input channels
+ const int ew0 = nk*ne01;
+
+ const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
+ const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
+ const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
- GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
GGML_ASSERT(nb10 == sizeof(float));
if (params->type == GGML_TASK_INIT) {
- // TODO: fix this memset (wsize is overestimated)
memset(params->wdata, 0, params->wsize);
- // prepare kernel data (src0)
- {
- ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
+ ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
- for (int64_t i02 = 0; i02 < ne02; i02++) {
- for (int64_t i01 = 0; i01 < ne01; i01++) {
- const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
- ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
- for (int64_t i00 = 0; i00 < ne00; i00++) {
- dst_data[i00*ew0 + i01] = src[i00];
- }
- }
- }
- }
+ for (int64_t i11 = 0; i11 < ne11; i11++) {
+ const float * const src = (float *)((char *) src1->data + i11*nb11);
+ ggml_fp16_t * dst_data = wdata;
- // prepare source data (src1)
- {
- ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
+ for (int64_t i0 = 0; i0 < ne0; i0++) {
+ for (int64_t ik = 0; ik < nk; ik++) {
+ const int idx0 = i0*s0 + ik*d0 - p0;
- for (int64_t i11 = 0; i11 < ne11; i11++) {
- const float * const src = (float *)((char *) src1->data + i11*nb11);
- ggml_fp16_t * dst_data = wdata;
- for (int64_t i10 = 0; i10 < ne10; i10++) {
- dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
+ if(!(idx0 < 0 || idx0 >= ne10)) {
+ dst_data[i0*ew0 + i11*nk + ik] = GGML_FP32_TO_FP16(src[idx0]);
+ }
}
}
}
}
// total rows in dst
- const int nr = ne02;
+ const int nr = ne2;
// rows per thread
const int dr = (nr + nth - 1)/nth;
const int ir0 = dr*ith;
const int ir1 = MIN(ir0 + dr, nr);
- for (int i1 = ir0; i1 < ir1; i1++) {
- float * dst_data = (float *)((char *) dst->data + i1*nb1);
- for (int64_t i0 = 0; i0 < ne10; ++i0) {
- dst_data[i0] = 0;
- for (int k = -nh; k <= nh; k++) {
- float v = 0.0f;
- ggml_vec_dot_f16(ew0, &v,
- (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
- (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
-
- dst_data[i0] += v;
+ ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
+
+ for (int i2 = 0; i2 < ne2; i2++) {
+ for (int i1 = ir0; i1 < ir1; i1++) {
+ float * dst_data = (float *)((char *) dst->data + i2*nb2 + i1*nb1);
+
+ for (int i0 = 0; i0 < ne0; i0++) {
+ ggml_vec_dot_f16(ew0, dst_data + i0,
+ (ggml_fp16_t *) ((char *) src0->data + i1*nb02),
+ (ggml_fp16_t *) wdata + i2*nb2 + i0*ew0);
}
}
}
}
-static void ggml_compute_forward_conv_1d_s1_ph_f32(
+static void ggml_compute_forward_conv_1d_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
int64_t t0 = ggml_perf_time_us();
UNUSED(t0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
const int ith = params->ith;
const int nth = params->nth;
const int nk = ne00;
- const int nh = nk/2;
- const int ew0 = ggml_up32(ne01);
+ const int ew0 = nk*ne01;
+
+ const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
+ const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
+ const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
- GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
GGML_ASSERT(nb00 == sizeof(float));
GGML_ASSERT(nb10 == sizeof(float));
if (params->type == GGML_TASK_INIT) {
- // TODO: fix this memset (wsize is overestimated)
memset(params->wdata, 0, params->wsize);
- // prepare kernel data (src0)
- {
- float * const wdata = (float *) params->wdata + 0;
+ float * const wdata = (float *) params->wdata + 0;
- for (int64_t i02 = 0; i02 < ne02; i02++) {
- for (int64_t i01 = 0; i01 < ne01; i01++) {
- const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
- float * dst_data = wdata + i02*ew0*ne00;
- for (int64_t i00 = 0; i00 < ne00; i00++) {
- dst_data[i00*ew0 + i01] = src[i00];
+ for (int64_t i11 = 0; i11 < ne11; i11++) {
+ const float * const src = (float *)((char *) src1->data + i11*nb11);
+ float * dst_data = wdata;
+
+ for (int64_t i0 = 0; i0 < ne0; i0++) {
+ for (int64_t ik = 0; ik < nk; ik++) {
+ const int idx0 = i0*s0 + ik*d0 - p0;
+
+ if(!(idx0 < 0 || idx0 >= ne10)) {
+ dst_data[i0*ew0 + i11*nk + ik] = src[idx0];
}
}
}
}
- // prepare source data (src1)
- {
- float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
+ return;
+ }
- for (int64_t i11 = 0; i11 < ne11; i11++) {
- const float * const src = (float *)((char *) src1->data + i11*nb11);
- float * dst_data = wdata;
- for (int64_t i10 = 0; i10 < ne10; i10++) {
- dst_data[(i10 + nh)*ew0 + i11] = src[i10];
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // total rows in dst
+ const int nr = ne02;
+
+ // rows per thread
+ const int dr = (nr + nth - 1)/nth;
+
+ // row range for this thread
+ const int ir0 = dr*ith;
+ const int ir1 = MIN(ir0 + dr, nr);
+
+ float * const wdata = (float *) params->wdata + 0;
+
+ for (int i2 = 0; i2 < ne2; i2++) {
+ for (int i1 = ir0; i1 < ir1; i1++) {
+ float * dst_data = (float *)((char *) dst->data + i2*nb2 + i1*nb1);
+
+ for (int i0 = 0; i0 < ne0; i0++) {
+ ggml_vec_dot_f32(ew0, dst_data + i0,
+ (float *) ((char *) src0->data + i1*nb02),
+ (float *) wdata + i2*nb2 + i0*ew0);
+ }
+ }
+ }
+}
+
+// TODO: reuse ggml_mul_mat or implement ggml_im2col and remove stage_0 and stage_1
+static void gemm_f16_out_f32(int64_t m, int64_t n, int64_t k,
+ ggml_fp16_t * A,
+ ggml_fp16_t * B,
+ float * C,
+ const int ith, const int nth) {
+ // does not seem to make a difference
+ int64_t m0, m1, n0, n1;
+ // patches per thread
+ if (m > n) {
+ n0 = 0;
+ n1 = n;
+
+ // total patches in dst
+ const int np = m;
+
+ // patches per thread
+ const int dp = (np + nth - 1)/nth;
+
+ // patch range for this thread
+ m0 = dp*ith;
+ m1 = MIN(m0 + dp, np);
+ } else {
+ m0 = 0;
+ m1 = m;
+
+ // total patches in dst
+ const int np = n;
+
+ // patches per thread
+ const int dp = (np + nth - 1)/nth;
+
+ // patch range for this thread
+ n0 = dp*ith;
+ n1 = MIN(n0 + dp, np);
+ }
+
+ // block-tiling attempt
+ int64_t blck_n = 16;
+ int64_t blck_m = 16;
+
+ // int64_t CACHE_SIZE = 2 * 1024 * 1024; // 2MB
+ // int64_t blck_size = CACHE_SIZE / (sizeof(float) + 2 * sizeof(ggml_fp16_t) * K);
+ // if (blck_size > 0) {
+ // blck_0 = 4;
+ // blck_1 = blck_size / blck_0;
+ // if (blck_1 < 0) {
+ // blck_1 = 1;
+ // }
+ // // blck_0 = (int64_t)sqrt(blck_size);
+ // // blck_1 = blck_0;
+ // }
+ // // printf("%zd %zd %zd %zd\n", blck_size, K, blck_0, blck_1);
+
+ for (int j = n0; j < n1; j+=blck_n) {
+ for (int i = m0; i < m1; i+=blck_m) {
+ // printf("i j k => %d %d %d\n", i, j, K);
+ for (int ii = i; ii < i + blck_m && ii < m1; ii++) {
+ for (int jj = j; jj < j + blck_n && jj < n1; jj++) {
+ ggml_vec_dot_f16(k,
+ C + ii*n + jj,
+ A + ii * k,
+ B + jj * k);
+ }
+ }
+ }
+ }
+}
+
+// src0: kernel [OC, IC, K]
+// src1: signal [N, IC, IL]
+// dst: result [N, OL, IC*K]
+static void ggml_compute_forward_conv_1d_stage_0_f32(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F16);
+
+ int64_t t0 = ggml_perf_time_us();
+ UNUSED(t0);
+
+ GGML_TENSOR_BINARY_OP_LOCALS;
+
+ const int64_t N = ne12;
+ const int64_t IC = ne11;
+ const int64_t IL = ne10;
+
+ const int64_t K = ne00;
+
+ const int64_t OL = ne1;
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
+ const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
+ const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
+
+ GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb10 == sizeof(float));
+
+ if (params->type == GGML_TASK_INIT) {
+ memset(dst->data, 0, ggml_nbytes(dst));
+ return;
+ }
+
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // im2col: [N, IC, IL] => [N, OL, IC*K]
+ {
+ ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data;
+
+ for (int64_t in = 0; in < N; in++) {
+ for (int64_t iol = 0; iol < OL; iol++) {
+ for (int64_t iic = ith; iic < IC; iic+=nth) {
+
+ // micro kernel
+ ggml_fp16_t * dst_data = wdata + (in*OL + iol)*(IC*K); // [IC, K]
+ const float * const src_data = (float *)((char *) src1->data + in*nb12 + iic*nb11); // [IL]
+
+ for (int64_t ik = 0; ik < K; ik++) {
+ const int64_t iil = iol*s0 + ik*d0 - p0;
+
+ if (!(iil < 0 || iil >= IL)) {
+ dst_data[iic*K + ik] = GGML_FP32_TO_FP16(src_data[iil]);
+ }
+ }
}
}
}
+ }
+}
+
+// gemm: [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K]
+// src0: [OC, IC, K]
+// src1: [N, OL, IC * K]
+// result: [N, OC, OL]
+static void ggml_compute_forward_conv_1d_stage_1_f16(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F16);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ int64_t t0 = ggml_perf_time_us();
+ UNUSED(t0);
+
+ if (params->type == GGML_TASK_INIT) {
return;
}
return;
}
- // total rows in dst
- const int nr = ne02;
+ GGML_TENSOR_BINARY_OP_LOCALS;
- // rows per thread
- const int dr = (nr + nth - 1)/nth;
+ GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb10 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb0 == sizeof(float));
- // row range for this thread
- const int ir0 = dr*ith;
- const int ir1 = MIN(ir0 + dr, nr);
+ const int N = ne12;
+ const int OL = ne11;
- for (int i1 = ir0; i1 < ir1; i1++) {
- float * dst_data = (float *)((char *) dst->data + i1*nb1);
- for (int64_t i0 = 0; i0 < ne10; ++i0) {
- dst_data[i0] = 0;
- for (int k = -nh; k <= nh; k++) {
- float v = 0.0f;
- ggml_vec_dot_f32(ew0, &v,
- (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
- (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
-
- dst_data[i0] += v;
- }
- }
+ const int OC = ne02;
+ const int IC = ne01;
+ const int K = ne00;
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ int64_t m = OC;
+ int64_t n = OL;
+ int64_t k = IC * K;
+
+ // [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K]
+ for (int i = 0; i < N; i++) {
+ ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k]
+ ggml_fp16_t * B = (ggml_fp16_t *)src1->data + i * m * k; // [n, k]
+ float * C = (float *)dst->data + i * m * n; // [m, n]
+
+ gemm_f16_out_f32(m, n, k, A, B, C, ith, nth);
}
}
-static void ggml_compute_forward_conv_1d_s1_ph(
+static void ggml_compute_forward_conv_1d(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
- switch (src0->type) {
+ switch(src0->type) {
case GGML_TYPE_F16:
{
- ggml_compute_forward_conv_1d_s1_ph_f16_f32(params, src0, src1, dst);
+ ggml_compute_forward_conv_1d_f16_f32(params, src0, src1, dst);
} break;
case GGML_TYPE_F32:
{
- ggml_compute_forward_conv_1d_s1_ph_f32(params, src0, src1, dst);
+ ggml_compute_forward_conv_1d_f32(params, src0, src1, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+static void ggml_compute_forward_conv_1d_stage_0(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ switch(src0->type) {
+ case GGML_TYPE_F16:
+ {
+ ggml_compute_forward_conv_1d_stage_0_f32(params, src0, src1, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+static void ggml_compute_forward_conv_1d_stage_1(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ switch(src0->type) {
+ case GGML_TYPE_F16:
+ {
+ ggml_compute_forward_conv_1d_stage_1_f16(params, src0, src1, dst);
} break;
default:
{
}
}
-static void ggml_compute_forward_conv_1d_s2_ph_f16_f32(
+// ggml_compute_forward_conv_transpose_1d
+
+static void ggml_compute_forward_conv_transpose_1d_f16_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
int64_t t0 = ggml_perf_time_us();
UNUSED(t0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
const int ith = params->ith;
const int nth = params->nth;
- const int nk = ne00;
- const int nh = nk/2;
-
- const int ew0 = ggml_up32(ne01);
+ const int nk = ne00*ne01*ne02;
- GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
GGML_ASSERT(nb10 == sizeof(float));
if (params->type == GGML_TASK_INIT) {
- // TODO: fix this memset (wsize is overestimated)
memset(params->wdata, 0, params->wsize);
- // prepare kernel data (src0)
+ // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
{
ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
for (int64_t i02 = 0; i02 < ne02; i02++) {
for (int64_t i01 = 0; i01 < ne01; i01++) {
const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
- ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
+ ggml_fp16_t * dst_data = wdata + i01*ne00*ne02;
for (int64_t i00 = 0; i00 < ne00; i00++) {
- dst_data[i00*ew0 + i01] = src[i00];
+ dst_data[i00*ne02 + i02] = src[i00];
}
}
}
}
- // prepare source data (src1)
+ // permute source data (src1) from (L x Cin) to (Cin x L)
{
- ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
+ ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
+ ggml_fp16_t * dst_data = wdata;
for (int64_t i11 = 0; i11 < ne11; i11++) {
const float * const src = (float *)((char *) src1->data + i11*nb11);
- ggml_fp16_t * dst_data = wdata;
for (int64_t i10 = 0; i10 < ne10; i10++) {
- dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
+ dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]);
}
}
}
+ // need to zero dst since we are accumulating into it
+ memset(dst->data, 0, ggml_nbytes(dst));
+
return;
}
return;
}
+ const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
+
// total rows in dst
- const int nr = ne02;
+ const int nr = ne1;
// rows per thread
const int dr = (nr + nth - 1)/nth;
const int ir0 = dr*ith;
const int ir1 = MIN(ir0 + dr, nr);
+ ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
+ ggml_fp16_t * const wdata_src = wdata + nk;
+
for (int i1 = ir0; i1 < ir1; i1++) {
float * dst_data = (float *)((char *) dst->data + i1*nb1);
- for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
- dst_data[i0/2] = 0;
- for (int k = -nh; k <= nh; k++) {
- float v = 0.0f;
- ggml_vec_dot_f16(ew0, &v,
- (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
- (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
-
- dst_data[i0/2] += v;
+ ggml_fp16_t * wdata_kernel = wdata + i1*ne02*ne00;
+ for (int i10 = 0; i10 < ne10; i10++) {
+ const int i1n = i10*ne11;
+ for (int i00 = 0; i00 < ne00; i00++) {
+ float v = 0;
+ ggml_vec_dot_f16(ne02, &v,
+ (ggml_fp16_t *) wdata_src + i1n,
+ (ggml_fp16_t *) wdata_kernel + i00*ne02);
+ dst_data[i10*s0 + i00] += v;
}
}
}
}
-static void ggml_compute_forward_conv_1d_s2_ph_f32(
+static void ggml_compute_forward_conv_transpose_1d_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
int64_t t0 = ggml_perf_time_us();
UNUSED(t0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
const int ith = params->ith;
const int nth = params->nth;
- const int nk = ne00;
- const int nh = nk/2;
-
- const int ew0 = ggml_up32(ne01);
+ const int nk = ne00*ne01*ne02;
- GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
GGML_ASSERT(nb00 == sizeof(float));
GGML_ASSERT(nb10 == sizeof(float));
if (params->type == GGML_TASK_INIT) {
- // TODO: fix this memset (wsize is overestimated)
memset(params->wdata, 0, params->wsize);
- // prepare kernel data (src0)
+ // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
{
float * const wdata = (float *) params->wdata + 0;
for (int64_t i02 = 0; i02 < ne02; i02++) {
for (int64_t i01 = 0; i01 < ne01; i01++) {
const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
- float * dst_data = wdata + i02*ew0*ne00;
+ float * dst_data = wdata + i01*ne00*ne02;
for (int64_t i00 = 0; i00 < ne00; i00++) {
- dst_data[i00*ew0 + i01] = src[i00];
+ dst_data[i00*ne02 + i02] = src[i00];
}
}
}
// prepare source data (src1)
{
- float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
+ float * const wdata = (float *) params->wdata + nk;
+ float * dst_data = wdata;
for (int64_t i11 = 0; i11 < ne11; i11++) {
const float * const src = (float *)((char *) src1->data + i11*nb11);
- float * dst_data = wdata;
for (int64_t i10 = 0; i10 < ne10; i10++) {
- dst_data[(i10 + nh)*ew0 + i11] = src[i10];
+ dst_data[i10*ne11 + i11] = src[i10];
}
}
}
+ // need to zero dst since we are accumulating into it
+ memset(dst->data, 0, ggml_nbytes(dst));
+
return;
}
return;
}
+ const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
+
// total rows in dst
- const int nr = ne02;
+ const int nr = ne1;
// rows per thread
const int dr = (nr + nth - 1)/nth;
const int ir0 = dr*ith;
const int ir1 = MIN(ir0 + dr, nr);
+ float * const wdata = (float *) params->wdata + 0;
+ float * const wdata_src = wdata + nk;
+
for (int i1 = ir0; i1 < ir1; i1++) {
float * dst_data = (float *)((char *) dst->data + i1*nb1);
- for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
- dst_data[i0/2] = 0;
- for (int k = -nh; k <= nh; k++) {
- float v = 0.0f;
- ggml_vec_dot_f32(ew0, &v,
- (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
- (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
-
- dst_data[i0/2] += v;
+ float * wdata_kernel = wdata + i1*ne02*ne00;
+ for (int i10 = 0; i10 < ne10; i10++) {
+ const int i1n = i10*ne11;
+ for (int i00 = 0; i00 < ne00; i00++) {
+ float v = 0;
+ ggml_vec_dot_f32(ne02, &v,
+ wdata_src + i1n,
+ wdata_kernel + i00*ne02);
+ dst_data[i10*s0 + i00] += v;
}
}
}
}
-static void ggml_compute_forward_conv_1d_s2_ph(
+static void ggml_compute_forward_conv_transpose_1d(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
switch (src0->type) {
case GGML_TYPE_F16:
{
- ggml_compute_forward_conv_1d_s2_ph_f16_f32(params, src0, src1, dst);
+ ggml_compute_forward_conv_transpose_1d_f16_f32(params, src0, src1, dst);
} break;
case GGML_TYPE_F32:
{
- ggml_compute_forward_conv_1d_s2_ph_f32(params, src0, src1, dst);
+ ggml_compute_forward_conv_transpose_1d_f32(params, src0, src1, dst);
} break;
default:
{
}
}
-// ggml_compute_forward_conv_1d
+// ggml_compute_forward_conv_2d
-static void ggml_compute_forward_conv_1d(
+// src0: kernel [OC, IC, KH, KW]
+// src1: image [N, IC, IH, IW]
+// dst: result [N, OH, OW, IC*KH*KW]
+static void ggml_compute_forward_conv_2d_stage_0_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F16);
+
+ int64_t t0 = ggml_perf_time_us();
+ UNUSED(t0);
+
+ GGML_TENSOR_BINARY_OP_LOCALS;
+
+ const int64_t N = ne13;
+ const int64_t IC = ne12;
+ const int64_t IH = ne11;
+ const int64_t IW = ne10;
+
+ // const int64_t OC = ne03;
+ // const int64_t IC = ne02;
+ const int64_t KH = ne01;
+ const int64_t KW = ne00;
+
+ const int64_t OH = ne2;
+ const int64_t OW = ne1;
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
- const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
- const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
- GGML_ASSERT(d0 == 1); // dilation not supported
- GGML_ASSERT(p0 == src0->ne[0]/2); // only half padding supported
- if (s0 == 1) {
- ggml_compute_forward_conv_1d_s1_ph(params, src0, src1, dst);
- } else if (s0 == 2) {
- ggml_compute_forward_conv_1d_s2_ph(params, src0, src1, dst);
- } else {
- GGML_ASSERT(false); // only stride 1 and 2 supported
- };
+ const int32_t s1 = ((const int32_t*)(dst->op_params))[1];
+ const int32_t p0 = ((const int32_t*)(dst->op_params))[2];
+ const int32_t p1 = ((const int32_t*)(dst->op_params))[3];
+ const int32_t d0 = ((const int32_t*)(dst->op_params))[4];
+ const int32_t d1 = ((const int32_t*)(dst->op_params))[5];
+
+ GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb10 == sizeof(float));
+
+ if (params->type == GGML_TASK_INIT) {
+ memset(dst->data, 0, ggml_nbytes(dst));
+ return;
+ }
+
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW]
+ {
+ ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data;
+
+ for (int64_t in = 0; in < N; in++) {
+ for (int64_t ioh = 0; ioh < OH; ioh++) {
+ for (int64_t iow = 0; iow < OW; iow++) {
+ for (int64_t iic = ith; iic < IC; iic+=nth) {
+
+ // micro kernel
+ ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
+ const float * const src_data = (float *)((char *) src1->data + in*nb13 + iic*nb12); // [IH, IW]
+
+ for (int64_t ikh = 0; ikh < KH; ikh++) {
+ for (int64_t ikw = 0; ikw < KW; ikw++) {
+ const int64_t iiw = iow*s0 + ikw*d0 - p0;
+ const int64_t iih = ioh*s1 + ikh*d1 - p1;
+
+ if (!(iih < 0 || iih >= IH || iiw < 0 || iiw >= IW)) {
+ dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
}
-// ggml_compute_forward_conv_2d
+// gemm: [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW]
+// src0: [OC, IC, KH, KW]
+// src1: [N, OH, OW, IC * KH * KW]
+// result: [N, OC, OH, OW]
+static void ggml_compute_forward_conv_2d_stage_1_f16(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F16);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+
+ int64_t t0 = ggml_perf_time_us();
+ UNUSED(t0);
+
+ if (params->type == GGML_TASK_INIT) {
+ return;
+ }
+
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ GGML_TENSOR_BINARY_OP_LOCALS;
+
+ GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb10 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb0 == sizeof(float));
+
+ const int N = ne13;
+ const int OH = ne12;
+ const int OW = ne11;
+
+ const int OC = ne03;
+ const int IC = ne02;
+ const int KH = ne01;
+ const int KW = ne00;
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ int64_t m = OC;
+ int64_t n = OH * OW;
+ int64_t k = IC * KH * KW;
+
+ // [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW]
+ for (int i = 0; i < N; i++) {
+ ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k]
+ ggml_fp16_t * B = (ggml_fp16_t *)src1->data + i * m * k; // [n, k]
+ float * C = (float *)dst->data + i * m * n; // [m, n]
+
+ gemm_f16_out_f32(m, n, k, A, B, C, ith, nth);
+ }
+}
static void ggml_compute_forward_conv_2d_f16_f32(
const struct ggml_compute_params * params,
int64_t t0 = ggml_perf_time_us();
UNUSED(t0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ // src1: image [N, IC, IH, IW]
+ // src0: kernel [OC, IC, KH, KW]
+ // dst: result [N, OC, OH, OW]
+ // ne12: IC
+ // ne0: OW
+ // ne1: OH
+ // nk0: KW
+ // nk1: KH
+ // ne13: N
+
+ const int N = ne13;
+ const int IC = ne12;
+ const int IH = ne11;
+ const int IW = ne10;
+
+ const int OC = ne03;
+ // const int IC = ne02;
+ const int KH = ne01;
+ const int KW = ne00;
+
+ const int OH = ne1;
+ const int OW = ne0;
const int ith = params->ith;
const int nth = params->nth;
- const int nk0 = ne00;
- const int nk1 = ne01;
+ // const int nk0 = ne00;
+ // const int nk1 = ne01;
// size of the convolution row - the kernel size unrolled across all channels
- const int ew0 = nk0*nk1*ne02;
+ // const int ew0 = nk0*nk1*ne02;
+ // ew0: IC*KH*KW
const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
const int32_t s1 = ((const int32_t*)(dst->op_params))[1];
memset(params->wdata, 0, params->wsize);
// prepare source data (src1)
+ // im2col: [N, IC, IH, IW] => [N*OH*OW, IC*KH*KW]
+
{
ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
- for (int i12 = 0; i12 < ne12; i12++) {
- const float * const src = (float *)((char *) src1->data + i12*nb12);
- ggml_fp16_t * dst_data = wdata;
+ for (int in = 0; in < N; in++) {
+ for (int iic = 0; iic < IC; iic++) {
+ for (int ioh = 0; ioh < OH; ioh++) {
+ for (int iow = 0; iow < OW; iow++) {
- for (int i1 = 0; i1 < ne1; i1++) {
- for (int i0 = 0; i0 < ne0; i0++) {
- for (int ik1 = 0; ik1 < nk1; ik1++) {
- for (int ik0 = 0; ik0 < nk0; ik0++) {
- const int idx0 = i0*s0 + ik0*d0 - p0;
- const int idx1 = i1*s1 + ik1*d1 - p1;
-
- if (!(idx1 < 0 || idx1 >= ne11 || idx0 < 0 || idx0 >= ne10)) {
- dst_data[(i1*ne0 + i0)*ew0 + i12*(nk0*nk1) + ik1*nk0 + ik0] =
- GGML_FP32_TO_FP16(src[idx1*ne10 + idx0]);
+ // micro kernel
+ ggml_fp16_t * dst_data = wdata + (in*OH*OW + ioh*OW + iow)*(IC*KH*KW); // [IC, KH, KW]
+ const float * const src_data = (float *)((char *) src1->data + in*nb13 + iic*nb12); // [IH, IW]
+
+ for (int ikh = 0; ikh < KH; ikh++) {
+ for (int ikw = 0; ikw < KW; ikw++) {
+ const int iiw = iow*s0 + ikw*d0 - p0;
+ const int iih = ioh*s1 + ikh*d1 - p1;
+
+ if (!(iih < 0 || iih >= IH || iiw < 0 || iiw >= IW)) {
+ dst_data[iic*(KH*KW) + ikh*KW + ikw] = GGML_FP32_TO_FP16(src_data[iih*IW + iiw]);
+ }
}
}
}
return;
}
- // total patches in dst
- const int np = ne2;
-
- // patches per thread
- const int dp = (np + nth - 1)/nth;
+ ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
+ // wdata: [N*OH*OW, IC*KH*KW]
+ // dst: result [N, OC, OH, OW]
+ // src0: kernel [OC, IC, KH, KW]
- // patch range for this thread
- const int ip0 = dp*ith;
- const int ip1 = MIN(ip0 + dp, np);
+ int64_t m = OC;
+ int64_t n = OH * OW;
+ int64_t k = IC * KH * KW;
- ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
+ // [N, OC, OH, OW] = [OC, IC * KH * KW] x [N*OH*OW, IC * KH * KW]
+ for (int i = 0; i < N; i++) {
+ ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k]
+ ggml_fp16_t * B = (ggml_fp16_t *)wdata + i * m * k; // [n, k]
+ float * C = (float *)dst->data + i * m * n; // [m * k]
- for (int i3 = 0; i3 < ne3; i3++) {
- for (int i2 = ip0; i2 < ip1; i2++) {
- float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2);
-
- for (int i1 = 0; i1 < ne1; ++i1) {
- for (int i0 = 0; i0 < ne0; ++i0) {
- ggml_vec_dot_f16(ew0, dst_data + i1*ne0 + i0,
- (ggml_fp16_t *) ((char *) src0->data + i2*nb03),
- (ggml_fp16_t *) wdata + i3*nb3 + (i1*ne0 + i0)*ew0);
- }
- }
- }
+ gemm_f16_out_f32(m, n, k, A, B, C, ith, nth);
}
}
}
}
+static void ggml_compute_forward_conv_2d_stage_0(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F16:
+ {
+ ggml_compute_forward_conv_2d_stage_0_f32(params, src0, src1, dst);
+ } break;
+ case GGML_TYPE_F32:
+ {
+ GGML_ASSERT(false);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+static void ggml_compute_forward_conv_2d_stage_1(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ switch (src0->type) {
+ case GGML_TYPE_F16:
+ {
+ ggml_compute_forward_conv_2d_stage_1_f16(params, src0, src1, dst);
+ } break;
+ case GGML_TYPE_F32:
+ {
+ GGML_ASSERT(false);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
// ggml_compute_forward_conv_transpose_2d
static void ggml_compute_forward_conv_transpose_2d(
int64_t t0 = ggml_perf_time_us();
UNUSED(t0);
- GGML_TENSOR_BINARY_OP_LOCALS;
+ GGML_TENSOR_BINARY_OP_LOCALS
const int ith = params->ith;
const int nth = params->nth;
}
}
+ memset(dst->data, 0, ggml_nbytes(dst));
+
return;
}
ggml_compute_forward_pool_1d_sk_p0(params, op, src0, k0, dst);
}
-// ggml_compute_forward_pool_2d_sk_p0
+// ggml_compute_forward_pool_2d
-static void ggml_compute_forward_pool_2d_sk_p0(
+static void ggml_compute_forward_pool_2d(
const struct ggml_compute_params * params,
- const enum ggml_op_pool op,
const struct ggml_tensor * src,
- const int k0,
- const int k1,
struct ggml_tensor * dst) {
assert(src->type == GGML_TYPE_F32);
assert(params->ith == 0);
return;
}
+ const int32_t * opts = (const int32_t *)dst->op_params;
+ enum ggml_op_pool op = opts[0];
+ const int k0 = opts[1];
+ const int k1 = opts[2];
+ const int s0 = opts[3];
+ const int s1 = opts[4];
+ const int p0 = opts[5];
+ const int p1 = opts[6];
const char * cdata = (const char*)src->data;
const char * const data_end = cdata + ggml_nbytes(src);
float * dplane = (float *)dst->data;
const int ka = k0 * k1;
+ const int offset0 = -p0;
+ const int offset1 = -p1;
while (cdata < data_end) {
for (int oy = 0; oy < py; ++oy) {
case GGML_OP_POOL_COUNT: GGML_ASSERT(false); break;
}
- const int ix = ox * k0;
- const int iy = oy * k1;
+ const int ix = offset0 + ox * s0;
+ const int iy = offset1 + oy * s1;
for (int ky = 0; ky < k1; ++ky) {
+ if (iy + ky < 0 || iy + ky >= src->ne[1]) continue;
const float * const srow = (const float *)(cdata + src->nb[1] * (iy + ky));
for (int kx = 0; kx < k0; ++kx) {
int j = ix + kx;
+ if (j < 0 || j >= src->ne[0]) continue;
switch (op) {
case GGML_OP_POOL_AVG: *out += srow[j]; break;
case GGML_OP_POOL_MAX: if (srow[j] > *out) *out = srow[j]; break;
}
cdata += src->nb[2];
- dplane += pa;
- }
-}
-
-// ggml_compute_forward_pool_2d
-
-static void ggml_compute_forward_pool_2d(
- const struct ggml_compute_params * params,
- const struct ggml_tensor * src0,
- struct ggml_tensor * dst) {
-
- const int32_t * opts = (const int32_t *)dst->op_params;
- enum ggml_op_pool op = opts[0];
- const int k0 = opts[1];
- const int k1 = opts[2];
- const int s0 = opts[3];
- const int s1 = opts[4];
- const int p0 = opts[5];
- const int p1 = opts[6];
- GGML_ASSERT(p0 == 0);
- GGML_ASSERT(p1 == 0); // padding not supported
- GGML_ASSERT(k0 == s0);
- GGML_ASSERT(k1 == s1); // only s = k supported
-
- ggml_compute_forward_pool_2d_sk_p0(params, op, src0, k0, k1, dst);
+ dplane += pa;
+ }
}
// ggml_compute_forward_upscale
const int ith = params->ith;
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
const int scale_factor = dst->op_params[0];
int64_t t0 = ggml_perf_time_us();
UNUSED(t0);
- GGML_TENSOR_LOCALS(int64_t, neq, q, ne);
- GGML_TENSOR_LOCALS(size_t, nbq, q, nb);
- GGML_TENSOR_LOCALS(int64_t, nek, k, ne);
- GGML_TENSOR_LOCALS(size_t, nbk, k, nb);
- GGML_TENSOR_LOCALS(int64_t, nev, v, ne);
- GGML_TENSOR_LOCALS(size_t, nbv, v, nb);
- GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
- GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
+ GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
+ GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
+ GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
+ GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
+ GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
+ GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
+ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
+ GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
const int ith = params->ith;
const int nth = params->nth;
S[i] = -INFINITY;
}
- for (int64_t ic = 0; ic < nek1; ++ic) {
+ const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
+ for (int64_t ic = 0; ic < masked_begin; ++ic) {
// k indices
const int ik3 = iq3;
- const int ik2 = iq2;
+ const int ik2 = iq2 % nek2;
const int ik1 = ic;
// S indices
}
// scale
- ggml_vec_scale_f32(nek1, S, scale);
+ ggml_vec_scale_f32(masked_begin, S, scale);
- if (masked) {
- for (int64_t i = P; i < M; i++) {
- if (i > P + iq1) {
- S[i] = -INFINITY;
- }
- }
+ for (int64_t i = masked_begin; i < M; i++) {
+ S[i] = -INFINITY;
}
// softmax
+ // exclude known -INF S[..] values from max and loop
+ // dont forget to set their SW values to zero
{
float max = -INFINITY;
- ggml_vec_max_f32(M, &max, S);
+ ggml_vec_max_f32(masked_begin, &max, S);
ggml_float sum = 0.0;
{
ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
+ if (i >= masked_begin) {
+ break;
+ }
float * SS = S + i;
for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
- if (SS[j] == -INFINITY) {
+ if (i + j >= masked_begin) {
+ break;
+ } else if (SS[j] == -INFINITY) {
SS[j] = 0.0f;
} else {
#ifndef GGML_FLASH_ATTN_EXP_FP16
#else
ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
memcpy(&scvt[j], &s, sizeof(uint16_t));
- const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
+ const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
#endif
sump[j] += (ggml_float)val;
SS[j] = val;
assert(sum > 0.0);
sum = 1.0/sum;
- ggml_vec_scale_f32(M, S, sum);
+ ggml_vec_scale_f32(masked_begin, S, sum);
#ifndef NDEBUG
- for (int i = 0; i < M; ++i) {
+ for (int i = 0; i < masked_begin; ++i) {
assert(!isnan(S[i]));
assert(!isinf(S[i]));
}
const int i2 = iq2;
const int i3 = iq3;
- ggml_vec_dot_f32(nek1,
- (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
- (float *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
+ // v indices
+ const int iv2 = iq2 % nev2;
+ const int iv3 = iq3;
+
+ ggml_vec_dot_f32(masked_begin,
+ (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
+ (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
S);
}
}
int64_t t0 = ggml_perf_time_us();
UNUSED(t0);
- GGML_TENSOR_LOCALS(int64_t, neq, q, ne);
- GGML_TENSOR_LOCALS(size_t, nbq, q, nb);
- GGML_TENSOR_LOCALS(int64_t, nek, k, ne);
- GGML_TENSOR_LOCALS(size_t, nbk, k, nb);
- GGML_TENSOR_LOCALS(int64_t, nev, v, ne);
- GGML_TENSOR_LOCALS(size_t, nbv, v, nb);
- GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
- GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
+ GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
+ GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
+ GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
+ GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
+ GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
+ GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
+ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
+ GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
const int ith = params->ith;
const int nth = params->nth;
for (int64_t ic = 0; ic < nek1; ++ic) {
// k indices
const int ik3 = iq3;
- const int ik2 = iq2;
+ const int ik2 = iq2 % nek2;
const int ik1 = ic;
// S indices
for (int64_t ic = 0; ic < nek1; ic += GGML_VEC_DOT_UNROLL) {
// k indices
const int ik3 = iq3;
- const int ik2 = iq2;
+ const int ik2 = iq2 % nek2;
const int ik1 = ic;
// S indices
}
// softmax
+ // todo: exclude known -INF S[..] values from max and loop, assuming their results to be zero.
+ // dont forget to set their S values to zero
{
float max = -INFINITY;
ggml_vec_max_f32(M, &max, S);
} else {
ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max);
memcpy(&scvt[j], &s, sizeof(uint16_t));
- const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
+ const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
sump[j] += (ggml_float)val;
SS[j] = val;
}
S16[i] = GGML_FP32_TO_FP16(S[i]);
}
+ // todo: exclude known zero S[..] values from dot (reducing nev0 and increasing begin of v and S16).
if (GGML_VEC_DOT_UNROLL == 1 || (nev1 % GGML_VEC_DOT_UNROLL != 0)) {
for (int64_t ic = 0; ic < nev1; ++ic) {
// dst indices
const int i2 = iq2;
const int i3 = iq3;
- ggml_vec_dot_f16(nek1,
- (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
- (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
+ // v indices
+ const int iv2 = iq2 % nev2;
+ const int iv3 = iq3;
+
+ ggml_vec_dot_f16(nev0,
+ (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
+ (ggml_fp16_t *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
S16);
}
} else {
const int i2 = iq2;
const int i3 = iq3;
- ggml_vec_dot_f16_unroll(nek1, nbv1,
- (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
- ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
+ // v indices
+ const int iv2 = iq2 % nev2;
+ const int iv3 = iq3;
+
+ ggml_vec_dot_f16_unroll(nev0, nbv1,
+ (float *) ((char *) dst->data + (ic*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
+ ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
S16);
}
}
int64_t t0 = ggml_perf_time_us();
UNUSED(t0);
- GGML_TENSOR_LOCALS(int64_t, nea, a, ne);
- GGML_TENSOR_LOCALS(size_t, nba, a, nb);
- GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne);
- GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb);
- GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne);
- GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb);
- GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne);
- GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb);
- GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne);
- GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb);
- GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
- GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
+ GGML_TENSOR_LOCALS(int64_t, nea, a, ne)
+ GGML_TENSOR_LOCALS(size_t, nba, a, nb)
+ GGML_TENSOR_LOCALS(int64_t, neb0, b0, ne)
+ GGML_TENSOR_LOCALS(size_t, nbb0, b0, nb)
+ GGML_TENSOR_LOCALS(int64_t, neb1, b1, ne)
+ GGML_TENSOR_LOCALS(size_t, nbb1, b1, nb)
+ GGML_TENSOR_LOCALS(int64_t, nec0, c0, ne)
+ GGML_TENSOR_LOCALS(size_t, nbc0, c0, nb)
+ GGML_TENSOR_LOCALS(int64_t, nec1, c1, ne)
+ GGML_TENSOR_LOCALS(size_t, nbc1, c1, nb)
+ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
+ GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
const int ith = params->ith;
const int nth = params->nth;
int64_t t0 = ggml_perf_time_us();
UNUSED(t0);
- GGML_TENSOR_LOCALS(int64_t, neq, q, ne);
- GGML_TENSOR_LOCALS(size_t, nbq, q, nb);
- GGML_TENSOR_LOCALS(int64_t, nek, k, ne);
- GGML_TENSOR_LOCALS(size_t, nbk, k, nb);
- GGML_TENSOR_LOCALS(int64_t, nev, v, ne);
- GGML_TENSOR_LOCALS(size_t, nbv, v, nb);
- GGML_TENSOR_LOCALS(int64_t, ned, d, ne);
- GGML_TENSOR_LOCALS(size_t, nbd, d, nb);
- GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
- GGML_TENSOR_LOCALS(size_t, nb, dst, nb);
+ GGML_TENSOR_LOCALS(int64_t, neq, q, ne)
+ GGML_TENSOR_LOCALS(size_t, nbq, q, nb)
+ GGML_TENSOR_LOCALS(int64_t, nek, k, ne)
+ GGML_TENSOR_LOCALS(size_t, nbk, k, nb)
+ GGML_TENSOR_LOCALS(int64_t, nev, v, ne)
+ GGML_TENSOR_LOCALS(size_t, nbv, v, nb)
+ GGML_TENSOR_LOCALS(int64_t, ned, d, ne)
+ GGML_TENSOR_LOCALS(size_t, nbd, d, nb)
+ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
+ GGML_TENSOR_LOCALS(size_t, nb, dst, nb)
const int ith = params->ith;
const int nth = params->nth;
return;
}
- // parallelize by q rows using ggml_vec_dot_f32
+ const int64_t elem_q = ggml_nelements(q);
+ const int64_t elem_k = ggml_nelements(k);
- // total rows in q
- const int nr = neq2*neq3;
+ enum ggml_type result_type = dst->type;
+ GGML_ASSERT(ggml_blck_size(result_type) == 1);
+ const size_t tsize = ggml_type_size(result_type);
+
+ const size_t offs_q = 0;
+ const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
+ const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
+
+ void * grad_q = (char *) dst->data;
+ void * grad_k = (char *) dst->data + offs_k;
+ void * grad_v = (char *) dst->data + offs_v;
+
+ const size_t nbgq1 = nb0*neq0;
+ const size_t nbgq2 = nb0*neq0*neq1;
+ const size_t nbgq3 = nb0*neq0*neq1*neq2;
+
+ const size_t nbgk1 = nb0*nek0;
+ const size_t nbgk2 = nb0*nek0*nek1;
+ const size_t nbgk3 = nb0*nek0*nek1*neq2;
+
+ const size_t nbgv1 = nb0*nev0;
+ const size_t nbgv2 = nb0*nev0*nev1;
+ const size_t nbgv3 = nb0*nev0*nev1*neq2;
+
+ // parallelize by k rows using ggml_vec_dot_f32
+
+ // total rows in k
+ const int nr = nek2*nek3;
// rows per thread
const int dr = (nr + nth - 1)/nth;
//printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale);
+ // how often k2 (and v2) is repeated in q2
+ int nrep = neq2/nek2;
+
for (int ir = ir0; ir < ir1; ++ir) {
// q indices
- const int iq3 = ir/(neq2);
- const int iq2 = ir - iq3*neq2;
- for ( int iq1 = 0; iq1 < neq1; ++iq1) {
+ const int ik3 = ir/(nek2);
+ const int ik2 = ir - ik3*nek2;
+ const int iq3 = ik3;
+ const int id3 = ik3;
+ const int iv3 = ik3;
+ const int iv2 = ik2;
- // not sure about CACHE_LINE_SIZE_F32..
- // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
- float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
- float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
+ for (int irep = 0; irep < nrep; ++irep) {
+ const int iq2 = ik2 + irep*nek2;
+ const int id2 = iq2;
- for (int i = M; i < Mup; ++i) {
- S[i] = -INFINITY;
- }
+ // (ik2 + irep*nek2) % nek2 == ik2
+ for (int iq1 = 0; iq1 < neq1; ++iq1) {
+ const int id1 = iq1;
- for (int64_t ic = 0; ic < nek1; ++ic) {
- // k indices
- const int ik3 = iq3;
- const int ik2 = iq2;
- const int ik1 = ic;
+ // not sure about CACHE_LINE_SIZE_F32..
+ // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset?
+ float * S = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 0*(mxDM+CACHE_LINE_SIZE_F32);
+ float * SM = (float *) params->wdata + ith*2*(mxDM + CACHE_LINE_SIZE_F32) + 1*(mxDM+CACHE_LINE_SIZE_F32);
- // S indices
- const int i1 = ik1;
+ for (int i = M; i < Mup; ++i) {
+ S[i] = -INFINITY;
+ }
- ggml_vec_dot_f32(neq0,
- S + i1,
- (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
- (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
- }
+ const int64_t masked_begin = masked ? (P + iq1 + 1) : M;
+ for (int64_t ic = 0; ic < masked_begin; ++ic) {
+ // k indices
+ const int ik1 = ic;
- // scale
- ggml_vec_scale_f32(nek1, S, scale);
+ // S indices
+ const int i1 = ik1;
- if (masked) {
- for (int64_t i = P; i < M; i++) {
- if (i > P + iq1) {
- S[i] = -INFINITY;
- }
+ ggml_vec_dot_f32(neq0,
+ S + i1,
+ (float *) ((char *) k->data + (ik1*nbk1 + ik2*nbk2 + ik3*nbk3)),
+ (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)));
}
- }
- // softmax
- {
- float max = -INFINITY;
- ggml_vec_max_f32(M, &max, S);
+ // scale
+ ggml_vec_scale_f32(masked_begin, S, scale);
- ggml_float sum = 0.0;
+ for (int64_t i = masked_begin; i < M; i++) {
+ S[i] = -INFINITY;
+ }
+
+ // softmax
+ // exclude known -INF S[..] values from max and loop
+ // dont forget to set their SM values to zero
{
+ float max = -INFINITY;
+ ggml_vec_max_f32(masked_begin, &max, S);
+
+ ggml_float sum = 0.0;
+ {
#ifdef GGML_SOFT_MAX_ACCELERATE
- max = -max;
- vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
- vvexpf(SM, SM, &Mup);
- ggml_vec_sum_f32(Mup, &sum, SM);
+ max = -max;
+ vDSP_vsadd(SM, 1, &max, SM, 1, Mup);
+ vvexpf(SM, SM, &Mup);
+ ggml_vec_sum_f32(Mup, &sum, SM);
#else
- uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
- ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
-
- for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
- float * SR = S + i;
- float * SW = SM + i;
+ uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt);
+ ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 };
- for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
- if (SR[j] == -INFINITY) {
- SW[j] = 0.0f;
- } else {
+ for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) {
+ if (i >= masked_begin) {
+ break;
+ }
+ float * SR = S + i;
+ float * SW = SM + i;
+
+ for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) {
+ if (i + j >= masked_begin) {
+ break;
+ } else if (SR[j] == -INFINITY) {
+ SW[j] = 0.0f;
+ } else {
#ifndef GGML_FLASH_ATTN_EXP_FP16
- const float val = expf(SR[j] - max);
+ const float val = expf(SR[j] - max);
#else
- ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
- memcpy(&scvt[j], &s, sizeof(uint16_t));
- const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt[j]]);
+ ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max);
+ memcpy(&scvt[j], &s, sizeof(uint16_t));
+ const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]);
#endif
- sump[j] += (ggml_float)val;
- SW[j] = val;
+ sump[j] += (ggml_float)val;
+ SW[j] = val;
+ }
}
}
- }
- for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
- sum += sump[i];
- }
+ for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) {
+ sum += sump[i];
+ }
#endif
- }
-
- assert(sum > 0.0);
-
- sum = 1.0/sum;
- ggml_vec_scale_f32(M, SM, sum);
-
- }
-
- // step-by-step explanation
- {
- // forward-process shape grads from backward process
- // parallel_for iq2,iq3:
- // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,iq2,iq3] += grad[kcur]
- // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
- // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iq2,iq3] += grad[vcur]
- // for iq1:
- // kcur = k[:D,:M,iq2,iq3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
- // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
- // vcur = v[:M,:D,iq2,iq3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
- // S0 = -Inf [D,1,1,1]
- // ~S1[i] = dot(kcur[:D,i], qcur)
- // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
- // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
- // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
- // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
- // ~S5[i] = dot(vcur[:,i], S4)
- // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,iq1,iq2,iq3]
- // ~dst[i,iq1,iq2,iq3] = S5[i] ^
- // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,iq1,iq2,iq3]
- // dst backward-/ grad[dst] = d
- //
- // output gradients with their dependencies:
- //
- // grad[kcur] = grad[S1].T @ qcur
- // grad[S1] = diag_mask_zero(grad[S3], P) * scale
- // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
- // grad[S4] = grad[S5] @ vcur
- // grad[S4] = d[:D,iq1,iq2,iq3] @ vcur
- // grad[qcur] = grad[S1] @ kcur
- // grad[vcur] = grad[S5].T @ S4
- // grad[vcur] = d[:D,iq1,iq2,iq3].T @ S4
- //
- // in post-order:
- //
- // S1 = qcur @ kcur.T
- // S2 = S1 * scale
- // S3 = diag_mask_inf(S2, P)
- // S4 = softmax(S3)
- // grad[S4] = d[:D,iq1,iq2,iq3] @ vcur
- // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
- // grad[S1] = diag_mask_zero(grad[S3], P) * scale
- // grad[qcur] = grad[S1] @ kcur
- // grad[kcur] = grad[S1].T @ qcur
- // grad[vcur] = d[:D,iq1,iq2,iq3].T @ S4
- //
- // using less variables (SM=S4):
- //
- // S = diag_mask_inf(qcur @ kcur.T * scale, P)
- // SM = softmax(S)
- // S = d[:D,iq1,iq2,iq3] @ vcur
- // dot_SM_gradSM = dot(SM, S)
- // S = SM * (S - dot(SM, S))
- // S = diag_mask_zero(S, P) * scale
- //
- // grad[q][:D,iq1,iq2,iq3] += S @ kcur
- // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
- // grad[v][:M,:D,iq2,iq3] += d[:D,iq1,iq2,iq3].T @ SM
- }
-
- // S = gradSM = d[:D,iq1,iq2,iq3] @ vcur
- // S = d[:D,iq1,iq2,iq3] @ vcur
- // S[:M] += vcur[:M,ic] * d[ic,iq1,iq2,iq3]
- ggml_vec_set_f32(M, S, 0);
- for (int64_t ic = 0; ic < D; ++ic) {
- // dst indices
- const int i1 = iq1;
- const int i2 = iq2;
- const int i3 = iq3;
+ }
- ggml_vec_mad_f32(M,
- S,
- (float *) ((char *) v->data + ( ic*nbv1 + i2*nbv2 + i3*nbv3)),
- *(float *) ((char *) d->data + (ic*nbd0 + i1*nbd1 + i2*nbd2 + i3*nbd3)));
- }
+ assert(sum > 0.0);
- // S = SM * (S - dot(SM, S))
- float dot_SM_gradSM = 0;
- ggml_vec_dot_f32 (M, &dot_SM_gradSM, SM, S);
- ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
- ggml_vec_mul_f32 (M, S, S, SM);
+ sum = 1.0/sum;
+ ggml_vec_scale_f32(masked_begin, SM, sum);
- // S = diag_mask_zero(S, P) * scale
- if (masked) {
- // for (int64_t i = P + iq1 + 1; i < M; i++) {
- // S[i] = 0;
- // }
- for (int64_t i = P; i < M; i++) {
- if (i > P + iq1) {
- S[i] = 0;
- }
}
- }
- ggml_vec_scale_f32(M, S, scale);
-
- void * grad_q = (char *) dst->data;
- void * grad_k = (char *) dst->data + nb0*D*N*neq2*neq3;
- void * grad_v = (char *) dst->data + nb0*D*N*neq2*neq3 + nb0*D*M*neq2*neq3;
-
- const size_t nbgq1 = nb0*neq0;
- const size_t nbgq2 = nb0*neq0*neq1;
- const size_t nbgq3 = nb0*neq0*neq1*neq2;
-
- const size_t nbgk1 = nb0*nek0;
- const size_t nbgk2 = nb0*nek0*nek1;
- const size_t nbgk3 = nb0*nek0*nek1*neq2;
-
- const size_t nbgv1 = nb0*nev0;
- const size_t nbgv2 = nb0*nev0*nev1;
- const size_t nbgv3 = nb0*nev0*nev1*neq2;
-
- // S shape [M,1]
- // SM shape [M,1]
- // kcur shape [D,M]
- // qcur shape [D,1]
- // vcur shape [M,D]
- //
- // grad[q][:D,iq1,iq2,iq3] += S @ kcur
- // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
- // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic]
- //
- //// grad[q][ic,iq1,iq2,iq3] += dot(kcur[:,ic],S.T)
- //// grad[q][ic,iq1,iq2,iq3] += dot(k[:D,ic,iq2,iq3],S.T)
- for (int64_t ic = 0; ic < M; ++ic) {
- // dst indices
- const int i1 = iq1;
- const int i2 = iq2;
- const int i3 = iq3;
- ggml_vec_mad_f32(D,
- (float *) ((char *) grad_q + (i1*nbgq1 + i2*nbgq2 + i3*nbgq3)),
- (float *) ((char *) k->data + (ic*nbk1 + i2*nbk2 + i3*nbk3)),
- S[ic]);
- }
+ // step-by-step explanation
+ {
+ // forward-process shape grads from backward process
+ // parallel_for ik2,ik3:
+ // for irep:
+ // iq2 = ik2 + irep*nek2
+ // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur]
+ // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur]
+ // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur]
+ // for iq1:
+ // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur
+ // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur
+ // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4
+ // S0 = -Inf [D,1,1,1]
+ // ~S1[i] = dot(kcur[:D,i], qcur)
+ // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale
+ // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P)
+ // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
+ // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur
+ // ~S5[i] = dot(vcur[:,i], S4)
+ // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3]
+ // ~dst[i,iq1,iq2,iq3] = S5[i] ^
+ // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3]
+ // dst backward-/ grad[dst] = d
+ //
+ // output gradients with their dependencies:
+ //
+ // grad[kcur] = grad[S1].T @ qcur
+ // grad[S1] = diag_mask_zero(grad[S3], P) * scale
+ // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
+ // grad[S4] = grad[S5] @ vcur
+ // grad[S4] = d[:D,id1,id2,id3] @ vcur
+ // grad[qcur] = grad[S1] @ kcur
+ // grad[vcur] = grad[S5].T @ S4
+ // grad[vcur] = d[:D,id1,id2,id3].T @ S4
+ //
+ // in post-order:
+ //
+ // S1 = qcur @ kcur.T
+ // S2 = S1 * scale
+ // S3 = diag_mask_inf(S2, P)
+ // S4 = softmax(S3)
+ // grad[S4] = d[:D,id1,id2,id3] @ vcur
+ // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4]))
+ // grad[S1] = diag_mask_zero(grad[S3], P) * scale
+ // grad[qcur] = grad[S1] @ kcur
+ // grad[kcur] = grad[S1].T @ qcur
+ // grad[vcur] = d[:D,id1,id2,id3].T @ S4
+ //
+ // using less variables (SM=S4):
+ //
+ // S = diag_mask_inf(qcur @ kcur.T * scale, P)
+ // SM = softmax(S)
+ // S = d[:D,iq1,iq2,iq3] @ vcur
+ // dot_SM_gradSM = dot(SM, S)
+ // S = SM * (S - dot(SM, S))
+ // S = diag_mask_zero(S, P) * scale
+ //
+ // grad[q][:D,iq1,iq2,iq3] += S @ kcur
+ // grad[k][:D,:M,ik2,ik3] += S.T @ qcur
+ // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
+ }
- // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
- // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
- // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
- for (int64_t ic = 0; ic < M; ++ic) {
- // dst indices
- const int i1 = iq1;
- const int i2 = iq2;
- const int i3 = iq3;
+ // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
+ // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3]
+ // for ic:
+ // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3]
+ // exclude known future zero S[..] values from operation
+ ggml_vec_set_f32(masked_begin, S, 0);
+ for (int64_t ic = 0; ic < D; ++ic) {
+ ggml_vec_mad_f32(masked_begin,
+ S,
+ (float *) ((char *) v->data + ( ic*nbv1 + iv2*nbv2 + iv3*nbv3)),
+ *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
+ }
- // ggml_vec_set_f32(D,
- // (float *) ((char *) grad_k + (ic*nbgk1 + i2*nbgk2 + i3*nbgk3)),
- // 0);
- ggml_vec_mad_f32(D,
- (float *) ((char *) grad_k + (ic*nbgk1 + i2*nbgk2 + i3*nbgk3)),
- (float *) ((char *) q->data + (i1*nbq1 + i2*nbq2 + i3*nbq3)),
- S[ic]);
- }
+ // S = SM * (S - dot(SM, S))
+ float dot_SM_gradSM = 0;
+ ggml_vec_dot_f32 (masked_begin, &dot_SM_gradSM, SM, S);
+ ggml_vec_acc1_f32(M, S, -dot_SM_gradSM);
+ ggml_vec_mul_f32 (masked_begin, S, S, SM);
+
+ // S = diag_mask_zero(S, P) * scale
+ // already done by above ggml_vec_set_f32
+
+ // exclude known zero S[..] values from operation
+ ggml_vec_scale_f32(masked_begin, S, scale);
+
+ // S shape [M,1]
+ // SM shape [M,1]
+ // kcur shape [D,M]
+ // qcur shape [D,1]
+ // vcur shape [M,D]
+
+ // grad[q][:D,iq1,iq2,iq3] += S @ kcur
+ // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M]
+ // for ic:
+ // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3]
+ // exclude known zero S[..] values from loop
+ for (int64_t ic = 0; ic < masked_begin; ++ic) {
+ ggml_vec_mad_f32(D,
+ (float *) ((char *) grad_q + (iq1*nbgq1 + iq2*nbgq2 + iq3*nbgq3)),
+ (float *) ((char *) k->data + (ic*nbk1 + ik2*nbk2 + ik3*nbk3)),
+ S[ic]);
+ }
- // grad[v][:M,:D,iq2,iq3] += d[:D,iq1,iq2,iq3].T @ SM
- // grad[v][:M,ic,iq2,iq3] += d[:D,iq1,iq2,iq3].T[0,ic] * SM[:M]
- // grad[v][:M,ic,iq2,iq3] += d[ic,iq1,iq2,iq3] * SM[:M]
- for (int64_t ic = 0; ic < D; ++ic) {
- // dst indices
- const int i1 = iq1;
- const int i2 = iq2;
- const int i3 = iq3;
+ // grad[k][:D,:M,iq2,iq3] += S.T @ qcur
+ // for ic:
+ // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0]
+ // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0]
+ // exclude known zero S[..] values from loop
+ for (int64_t ic = 0; ic < masked_begin; ++ic) {
+ ggml_vec_mad_f32(D,
+ (float *) ((char *) grad_k + (ic*nbgk1 + ik2*nbgk2 + ik3*nbgk3)),
+ (float *) ((char *) q->data + (iq1*nbq1 + iq2*nbq2 + iq3*nbq3)),
+ S[ic]);
+ }
- // ggml_vec_set_f32(M,
- // (float *) ((char *) grad_v + ( ic*nbgv1 + i2*nbgv2 + i3*nbgv3)),
- // 0);
- ggml_vec_mad_f32(M,
- (float *) ((char *) grad_v + ( ic*nbgv1 + i2*nbgv2 + i3*nbgv3)),
- SM,
- *(float *) ((char *) d->data + (ic*nbd0 + i1*nbd1 + i2*nbd2 + i3*nbd3)));
+ // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM
+ // for ic:
+ // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M]
+ // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M]
+ // exclude known zero SM[..] values from mad
+ for (int64_t ic = 0; ic < D; ++ic) {
+ ggml_vec_mad_f32(masked_begin,
+ (float *) ((char *) grad_v + ( ic*nbgv1 + iv2*nbgv2 + iv3*nbgv3)),
+ SM,
+ *(float *) ((char *) d->data + (ic*nbd0 + id1*nbd1 + id2*nbd2 + id3*nbd3)));
+ }
}
}
}
return;
}
- GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
- GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
+ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
+ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
const int32_t nep0 = ((const int32_t *)(dst->op_params))[0];
const int32_t nep1 = ((const int32_t *)(dst->op_params))[1];
return;
}
- GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
- GGML_TENSOR_LOCALS(int64_t, ne, dst, ne);
+ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne)
+ GGML_TENSOR_LOCALS(int64_t, ne, dst, ne)
const int32_t w = ((const int32_t *)(dst->op_params))[0];
{
ggml_compute_forward_silu(params, src0, dst);
} break;
+ case GGML_UNARY_OP_LEAKY:
+ {
+ ggml_compute_forward_leaky(params, src0, dst);
+ } break;
default:
{
GGML_ASSERT(false);
// ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322
- GGML_TENSOR_UNARY_OP_LOCALS;
+ GGML_TENSOR_UNARY_OP_LOCALS
const int64_t w = ne1;
const int ip0 = dp*ith;
const int ip1 = MIN(ip0 + dp, np);
-
for (int64_t i13 = ip0; i13 < ip1; ++i13) {
for (int64_t i12 = 0; i12 < ne12; ++i12) {
for (int64_t i11 = 0; i11 < ne11; ++i11) {
}
}
-
static void ggml_compute_forward_map_unary(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
}
}
-
static void ggml_compute_forward_map_binary(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
fun(dst, a, b);
}
-
// ggml_compute_forward_map_custom3
static void ggml_compute_forward_map_custom3_f32(
#else
ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
memcpy(&scvt, &s, sizeof(scvt));
- const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
+ const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
#endif
sum += (ggml_float)val;
st[i] = val;
#else
ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max);
memcpy(&scvt, &s, sizeof(scvt));
- const float val = GGML_FP16_TO_FP32(table_exp_f16[scvt]);
+ const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]);
#endif
sum += (ggml_float)val;
ds0[i] = val;
ggml_vec_sub_f32(nc, ds0, ds0, s1);
ggml_vec_scale_f32(nc, ds0, d[0] / (float) nr);
-
#ifndef NDEBUG
for (int i = 0; i < nc; ++i) {
assert(!isnan(ds0[i]));
}
}
-
/////////////////////////////////
static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
GGML_ASSERT(params);
+ if (tensor->op == GGML_OP_NONE) {
+ return;
+ }
+
#ifdef GGML_USE_CUBLAS
bool skip_cpu = ggml_cuda_compute_forward(params, tensor);
if (skip_cpu) {
} break;
case GGML_OP_GET_ROWS_BACK:
{
- ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
+ ggml_compute_forward_get_rows_back(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_DIAG:
{
} break;
case GGML_OP_ROPE:
{
- ggml_compute_forward_rope(params, tensor->src[0], tensor);
+ ggml_compute_forward_rope(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_ROPE_BACK:
{
- ggml_compute_forward_rope_back(params, tensor->src[0], tensor);
+ ggml_compute_forward_rope_back(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_ALIBI:
{
{
ggml_compute_forward_conv_1d(params, tensor->src[0], tensor->src[1], tensor);
} break;
+ case GGML_OP_CONV_1D_STAGE_0:
+ {
+ ggml_compute_forward_conv_1d_stage_0(params, tensor->src[0], tensor->src[1], tensor);
+ } break;
+ case GGML_OP_CONV_1D_STAGE_1:
+ {
+ ggml_compute_forward_conv_1d_stage_1(params, tensor->src[0], tensor->src[1], tensor);
+ } break;
+ case GGML_OP_CONV_TRANSPOSE_1D:
+ {
+ ggml_compute_forward_conv_transpose_1d(params, tensor->src[0], tensor->src[1], tensor);
+ } break;
case GGML_OP_CONV_2D:
{
ggml_compute_forward_conv_2d(params, tensor->src[0], tensor->src[1], tensor);
} break;
+ case GGML_OP_CONV_2D_STAGE_0:
+ {
+ ggml_compute_forward_conv_2d_stage_0(params, tensor->src[0], tensor->src[1], tensor);
+ } break;
+ case GGML_OP_CONV_2D_STAGE_1:
+ {
+ ggml_compute_forward_conv_2d_stage_1(params, tensor->src[0], tensor->src[1], tensor);
+ } break;
case GGML_OP_CONV_TRANSPOSE_2D:
{
ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor);
////////////////////////////////////////////////////////////////////////////////
-static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, bool inplace) {
+static size_t ggml_hash_size(size_t min_sz) {
+ // next primes after powers of two
+ static const size_t primes[] = {
+ 2, 3, 5, 11, 17, 37, 67, 131, 257, 521, 1031,
+ 2053, 4099, 8209, 16411, 32771, 65537, 131101,
+ 262147, 524309, 1048583, 2097169, 4194319, 8388617,
+ 16777259, 33554467, 67108879, 134217757, 268435459,
+ 536870923, 1073741827, 2147483659
+ };
+ static const size_t n_primes = sizeof(primes)/sizeof(primes[0]);
+
+ // find the smallest prime that is larger or equal to min_sz
+ size_t l = 0;
+ size_t r = n_primes;
+ while (l < r) {
+ size_t m = (l + r)/2;
+ if (primes[m] < min_sz) {
+ l = m + 1;
+ } else {
+ r = m;
+ }
+ }
+ size_t sz = l < n_primes ? primes[l] : min_sz | 1;
+ return sz;
+}
+
+static size_t ggml_hash(const void * p) {
+ return (size_t)p;
+}
+
+size_t ggml_hash_find(const struct ggml_hash_set hash_set, struct ggml_tensor * key) {
+ size_t h = ggml_hash(key) % hash_set.size;
+
+ // linear probing
+ size_t i = h;
+ while (hash_set.keys[i] != NULL && hash_set.keys[i] != key) {
+ i = (i + 1) % hash_set.size;
+ if (i == h) {
+ // visited all hash table entries -> not found
+ return GGML_HASHTABLE_FULL;
+ }
+ }
+ return i;
+}
+
+bool ggml_hash_contains(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
+ size_t i = ggml_hash_find(hash_set, key);
+ return i != GGML_HASHTABLE_FULL && hash_set.keys[i] == key;
+}
+
+size_t ggml_hash_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
+ size_t i = ggml_hash_find(hash_set, key);
+
+ GGML_ASSERT(i != GGML_HASHTABLE_FULL);
+
+ if (hash_set.keys[i] == key) {
+ return GGML_HASHTABLE_ALREADY_EXISTS;
+ }
+
+ // insert
+ GGML_ASSERT(hash_set.keys[i] == NULL);
+ hash_set.keys[i] = key;
+ return i;
+}
+
+size_t ggml_hash_find_or_insert(struct ggml_hash_set hash_set, struct ggml_tensor * key) {
+ size_t i = ggml_hash_find(hash_set, key);
+
+ GGML_ASSERT(i != GGML_HASHTABLE_FULL);
+
+ hash_set.keys[i] = key;
+ return i;
+}
+
+static struct ggml_hash_set ggml_hash_set_new(size_t size) {
+ size = ggml_hash_size(size);
+ struct ggml_hash_set result;
+ result.size = size;
+ result.keys = malloc(sizeof(struct ggml_tensor *) * size);
+ memset(result.keys, 0, sizeof(struct ggml_tensor *) * size);
+ return result;
+}
+
+static void ggml_hash_set_free(struct ggml_hash_set hash_set) {
+ free(hash_set.keys);
+}
+
+struct hash_map {
+ struct ggml_hash_set set;
+ struct ggml_tensor ** vals;
+};
+
+static struct hash_map * ggml_new_hash_map(size_t size) {
+ struct hash_map * result = malloc(sizeof(struct hash_map));
+ result->set = ggml_hash_set_new(size);
+ result->vals = malloc(sizeof(struct ggml_tensor *) * result->set.size);
+ memset(result->vals, 0, sizeof(struct ggml_tensor *) * result->set.size);
+ return result;
+}
+
+static void ggml_hash_map_free(struct hash_map * map) {
+ ggml_hash_set_free(map->set);
+ free(map->vals);
+ free(map);
+}
+
+// gradient checkpointing
+
+static struct ggml_tensor * ggml_recompute_graph_node(
+ struct ggml_context * ctx,
+ struct ggml_cgraph * graph,
+ struct hash_map * replacements,
+ struct ggml_tensor * node) {
+
+ if (node == NULL) {
+ return NULL;
+ }
+
+ if (node->is_param) {
+ return node;
+ }
+
+ if (!ggml_hash_contains(graph->visited_hash_table, node)) {
+ return node;
+ }
+
+ int count_children = 0;
+ for (int k = 0; k < GGML_MAX_SRC; ++k) {
+ if (node->src[k]) {
+ ++count_children;
+ }
+ }
+
+ if (count_children == 0) {
+ return node;
+ }
+
+ size_t i = ggml_hash_find(replacements->set, node);
+ GGML_ASSERT(i != GGML_HASHTABLE_FULL); // assert that not full
+ if (replacements->set.keys[i] == node) {
+ return replacements->vals[i];
+ }
+
+ struct ggml_tensor * clone = ggml_new_tensor(ctx, node->type, node->n_dims, node->ne);
+
+ // insert clone into replacements
+ GGML_ASSERT(replacements->set.keys[i] == NULL); // assert that we don't overwrite
+ replacements->set.keys[i] = node;
+ replacements->vals[i] = clone;
+
+ clone->op = node->op;
+ clone->grad = node->grad;
+ clone->is_param = node->is_param;
+ clone->extra = node->extra;
+ for (int k = 0; k < GGML_MAX_DIMS; ++k) {
+ clone->nb[k] = node->nb[k];
+ }
+ for (int k = 0; k < GGML_MAX_SRC; ++k) {
+ clone->src[k] = ggml_recompute_graph_node(ctx, graph, replacements, node->src[k]);
+ }
+ if (node->view_src != NULL) {
+ clone->data = (node->view_src->data == NULL)
+ ? NULL // view_src not yet allocated
+ : (char *) node->view_src->data // view_src already allocated
+ + node->view_offs;
+ clone->view_src = node->view_src;
+ clone->view_offs = node->view_offs;
+ }
+
+ GGML_ASSERT(sizeof(node->op_params) == sizeof(int32_t) * (GGML_MAX_OP_PARAMS / sizeof(int32_t)));
+ GGML_ASSERT(sizeof(node->name) == GGML_MAX_NAME);
+ memcpy(clone->op_params, node->op_params, sizeof(node->op_params));
+ ggml_format_name(clone, "%s (clone)", ggml_get_name(node));
+
+ return clone;
+}
+
+void ggml_build_backward_gradient_checkpointing(
+ struct ggml_context * ctx,
+ struct ggml_cgraph * gf,
+ struct ggml_cgraph * gb,
+ struct ggml_cgraph * gb_tmp,
+ struct ggml_tensor * * checkpoints,
+ int n_checkpoints) {
+ ggml_graph_cpy(gf, gb_tmp);
+ ggml_build_backward_expand(ctx, gf, gb_tmp, true);
+
+ if (n_checkpoints <= 0) {
+ ggml_graph_cpy(gb_tmp, gb);
+ return;
+ }
+
+ struct hash_map * replacements = ggml_new_hash_map(gf->n_nodes + gf->n_leafs + n_checkpoints);
+
+ // insert checkpoints in replacements
+ for (int i = 0; i < n_checkpoints; ++i) {
+ size_t k = ggml_hash_find(replacements->set, checkpoints[i]);
+ GGML_ASSERT(k != GGML_HASHTABLE_FULL); // assert that not full
+ GGML_ASSERT(replacements->set.keys[k] == NULL); // assert that we don't overwrite
+ replacements->set.keys[k] = checkpoints[i];
+ replacements->vals[k] = checkpoints[i];
+ }
+
+ ggml_graph_cpy(gf, gb);
+ // rewrite gb_tmp->nodes[gf->n_nodes:gb_tmp->n_nodes],
+ // replacing references to gb_tmp->nodes[0:gf->n_nodes] ( == gf->nodes[0:gf->n_nodes]),
+ // by recomputing them from checkpoints
+ for (int i = gf->n_nodes; i<gb_tmp->n_nodes; ++i) {
+ struct ggml_tensor * node = gb_tmp->nodes[i];
+ for (int k = 0; k < GGML_MAX_SRC; ++k) {
+ // insert new tensors recomputing src, reusing already made replacements,
+ // remember replacements: remember new tensors with mapping from corresponding gf nodes
+ // recurse for input tensors,
+ // unless (i.e. terminating when) input tensors are replacments (like checkpoints)
+ node->src[k] = ggml_recompute_graph_node(ctx, gf, replacements, node->src[k]);
+ }
+ // insert rewritten backward node with replacements made into resulting backward graph gb
+ ggml_build_forward_expand(gb, node);
+ }
+
+ ggml_hash_map_free(replacements);
+}
+
+// functions to change gradients considering the case that input a might be initial gradient with zero value
+
+static struct ggml_tensor * ggml_add_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
+ if (ggml_hash_contains(zero_table, a)) {
+ return b;
+ } else {
+ return ggml_add_impl(ctx, a, b, false);
+ }
+}
+
+static struct ggml_tensor * ggml_acc_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, size_t nb1, size_t nb2, size_t nb3, size_t offset, struct ggml_hash_set zero_table) {
+ if (ggml_hash_contains(zero_table, a)) {
+ struct ggml_tensor * a_zero = ggml_scale(ctx, a, ggml_new_f32(ctx, 0));
+ return ggml_acc_impl(ctx, a_zero, b, nb1, nb2, nb3, offset, false);
+ } else {
+ return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
+ }
+}
+
+static struct ggml_tensor * ggml_add1_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
+ if (ggml_hash_contains(zero_table, a)) {
+ return ggml_repeat(ctx, b, a);
+ } else {
+ return ggml_add1_impl(ctx, a, b, false);
+ }
+}
+
+static struct ggml_tensor * ggml_sub_or_set(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, struct ggml_hash_set zero_table) {
+ if (ggml_hash_contains(zero_table, a)) {
+ return ggml_neg(ctx, b);
+ } else {
+ return ggml_sub_impl(ctx, a, b, false);
+ }
+}
+
+static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor * tensor, struct ggml_hash_set zero_table) {
struct ggml_tensor * src0 = tensor->src[0];
struct ggml_tensor * src1 = tensor->src[1];
case GGML_OP_DUP:
{
if (src0->grad) {
- src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
+ src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
}
} break;
case GGML_OP_ADD:
{
if (src0->grad) {
- src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
+ src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
}
if (src1->grad) {
- src1->grad = ggml_add_impl(ctx, src1->grad, tensor->grad, inplace);
+ src1->grad = ggml_add_or_set(ctx, src1->grad, tensor->grad, zero_table);
}
} break;
case GGML_OP_ADD1:
{
if (src0->grad) {
- src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
+ src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
}
if (src1->grad) {
- src1->grad = ggml_add_impl(ctx,
+ src1->grad = ggml_add_or_set(ctx,
src1->grad,
ggml_mean(ctx, tensor->grad), // TODO: should probably be sum instead of mean
- inplace);
+ zero_table);
}
} break;
case GGML_OP_ACC:
{
if (src0->grad) {
- src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
+ src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
}
if (src1->grad) {
const size_t nb1 = ((int32_t *) tensor->op_params)[0];
nb1, nb2, nb3, offset);
src1->grad =
- ggml_add_impl(ctx,
+ ggml_add_or_set(ctx,
src1->grad,
ggml_reshape(ctx,
ggml_cont(ctx, tensor_grad_view),
src1->grad),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_SUB:
{
if (src0->grad) {
- src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
+ src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
}
if (src1->grad) {
- src1->grad = ggml_sub_impl(ctx, src1->grad, tensor->grad, inplace);
+ src1->grad = ggml_sub_or_set(ctx, src1->grad, tensor->grad, zero_table);
}
} break;
case GGML_OP_MUL:
{
if (src0->grad) {
src0->grad =
- ggml_add_impl(ctx,
+ ggml_add_or_set(ctx,
src0->grad,
ggml_mul(ctx, src1, tensor->grad),
- inplace);
+ zero_table);
}
if (src1->grad) {
src1->grad =
- ggml_add_impl(ctx,
+ ggml_add_or_set(ctx,
src1->grad,
ggml_mul(ctx, src0, tensor->grad),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_DIV:
{
if (src0->grad) {
src0->grad =
- ggml_add_impl(ctx,
+ ggml_add_or_set(ctx,
src0->grad,
ggml_div(ctx, tensor->grad, src1),
- inplace);
+ zero_table);
}
if (src1->grad) {
src1->grad =
- ggml_sub_impl(ctx,
+ ggml_sub_or_set(ctx,
src1->grad,
ggml_mul(ctx,
tensor->grad,
ggml_div(ctx, tensor, src1)),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_SQR:
{
if (src0->grad) {
src0->grad =
- ggml_add_impl(ctx,
+ ggml_add_or_set(ctx,
src0->grad,
ggml_scale(ctx,
ggml_mul(ctx, src0, tensor->grad),
ggml_new_f32(ctx, 2.0f)),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_SQRT:
{
if (src0->grad) {
src0->grad =
- ggml_add_impl(ctx,
+ ggml_add_or_set(ctx,
src0->grad,
ggml_scale(ctx,
ggml_div(ctx,
tensor->grad,
tensor),
ggml_new_f32(ctx, 0.5f)),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_LOG:
{
if (src0->grad) {
src0->grad =
- ggml_add_impl(ctx,
+ ggml_add_or_set(ctx,
src0->grad,
ggml_div(ctx,
tensor->grad,
src0),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_SUM:
{
if (src0->grad) {
src0->grad =
- ggml_add1_impl(ctx,
+ ggml_add1_or_set(ctx,
src0->grad,
tensor->grad,
- inplace);
+ zero_table);
}
} break;
case GGML_OP_SUM_ROWS:
{
if (src0->grad) {
src0->grad =
- ggml_add_impl(ctx,
+ ggml_add_or_set(ctx,
src0->grad,
ggml_repeat(ctx,
tensor->grad,
src0->grad),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_MEAN:
{
// necessary for llama
if (src0->grad) {
- src0->grad = ggml_add_impl(ctx,
+ src0->grad = ggml_add_or_set(ctx,
src0->grad,
ggml_repeat_back(ctx, tensor->grad, src0->grad),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_REPEAT_BACK:
{
if (src0->grad) {
// TODO: test this
- src0->grad = ggml_add_impl(ctx,
+ src0->grad = ggml_add_or_set(ctx,
src0->grad,
ggml_repeat(ctx, tensor->grad, src0->grad),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_CONCAT:
float eps;
memcpy(&eps, tensor->op_params, sizeof(float));
- src0->grad = ggml_add_impl(ctx,
+ src0->grad = ggml_add_or_set(ctx,
src0->grad,
ggml_rms_norm_back(ctx, src0, tensor->grad, eps),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_RMS_NORM_BACK:
// ds0 = dt.dot(s1.T) #.T gives the transpose of the matrix
// ds1 = t.T.dot(dt)
- // tensor.shape [m,p]
- // src0.shape [n,m]
- // src1.shape [n,p]
+ // tensor.shape [m,p,qq,rr]
+ // src0.shape [n,m,q1,r1]
+ // src1.shape [n,p,qq,rr]
// necessary for llama
if (src0->grad) {
+ struct ggml_tensor * s1_tg =
+ ggml_out_prod(ctx, // [n,m,qq,rr]
+ src1, // [n,p,qq,rr]
+ tensor->grad); // [m,p,qq,rr]
+ const int64_t qq = s1_tg->ne[2];
+ const int64_t rr = s1_tg->ne[3];
+ const int64_t q1 = src0->ne[2];
+ const int64_t r1 = src0->ne[3];
+ const bool ne2_broadcasted = qq > q1;
+ const bool ne3_broadcasted = rr > r1;
+ if (ne2_broadcasted || ne3_broadcasted) {
+ // sum broadcast repetitions of s1_tg into shape of src0
+ s1_tg = ggml_repeat_back(ctx, s1_tg, src0);
+ }
src0->grad =
- ggml_add_impl(ctx,
- src0->grad,
- ggml_out_prod(ctx, // [n,m]
- src1, // [n,p]
- tensor->grad), // [m,p]
- inplace);
+ ggml_add_or_set(ctx,
+ src0->grad, // [n,m,q1,r1]
+ s1_tg, // [n,m,q1,r1]
+ zero_table);
}
if (src1->grad) {
src1->grad =
- ggml_add_impl(ctx,
- src1->grad,
- // ggml_mul_mat(ctx, // [n,p]
- // ggml_cont(ctx, // [m,n]
- // ggml_transpose(ctx, src0)), // [m,n]
- // tensor->grad), // [m,p]
+ ggml_add_or_set(ctx,
+ src1->grad, // [n,p,qq,rr]
+ // ggml_mul_mat(ctx, // [n,p,qq,rr]
+ // ggml_cont(ctx, // [m,n,q1,r1]
+ // ggml_transpose(ctx, src0)), // [m,n,q1,r1]
+ // tensor->grad), // [m,p,qq,rr]
// // when src0 is bigger than tensor->grad (this is mostly the case in llama),
// // avoid transpose of src0, rather transpose smaller tensor->grad
// // and then use ggml_out_prod
- ggml_out_prod(ctx, // [n,p]
- src0, // [n,m]
- ggml_transpose(ctx, // [p,m]
- tensor->grad)), // [m,p]
- inplace);
+ ggml_out_prod(ctx, // [n,p,qq,rr]
+ src0, // [n,m,q1,r1]
+ ggml_transpose(ctx, // [p,m,qq,rr]
+ tensor->grad)), // [m,p,qq,rr]
+ zero_table);
}
} break;
case GGML_OP_OUT_PROD:
// necessary for llama
if (src0->grad) {
src0->grad =
- ggml_add_impl(ctx,
+ ggml_add_or_set(ctx,
src0->grad,
ggml_scale_impl(ctx, tensor->grad, src1, false),
- inplace);
+ zero_table);
}
if (src1->grad) {
src1->grad =
- ggml_add_impl(ctx,
+ ggml_add_or_set(ctx,
src1->grad,
ggml_sum(ctx, ggml_mul_impl(ctx, tensor->grad, src0, false)),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_SET:
}
if (src0->grad) {
- src0->grad = ggml_add_impl(ctx,
+ src0->grad = ggml_add_or_set(ctx,
src0->grad,
ggml_acc_impl(ctx,
tensor->grad,
ggml_neg(ctx, tensor_grad_view),
nb1, nb2, nb3, offset, false),
- inplace);
+ zero_table);
}
if (src1->grad) {
src1->grad =
- ggml_add_impl(ctx,
+ ggml_add_or_set(ctx,
src1->grad,
ggml_reshape(ctx,
ggml_cont(ctx, tensor_grad_view),
src1->grad),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_CPY:
// tensor = src0 * 1 + src1 * 0
if (src0->grad) {
// dsrc0 = dtensor * 1
- src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
+ src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
}
if (src1->grad) {
// dsrc1 = dtensor * 0 -> noop
if (src0->grad) {
GGML_ASSERT(ggml_is_contiguous(src0->grad));
GGML_ASSERT(ggml_is_contiguous(tensor->grad));
- src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
+ src0->grad = ggml_add_or_set(ctx, src0->grad, tensor->grad, zero_table);
}
} break;
case GGML_OP_RESHAPE:
// necessary for llama
if (src0->grad) {
src0->grad =
- ggml_add_impl(ctx, src0->grad,
- ggml_reshape(ctx, tensor->grad, src0->grad),
- inplace);
+ ggml_add_or_set(ctx, src0->grad,
+ ggml_reshape(ctx,
+ ggml_is_contiguous(tensor->grad)
+ ? tensor->grad
+ : ggml_cont(ctx, tensor->grad),
+ src0->grad),
+ zero_table);
}
} break;
case GGML_OP_VIEW:
nb3 = (nb3 / n0) * ng;
}
- src0->grad = ggml_acc_impl(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, inplace);
+ src0->grad = ggml_acc_or_set(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, zero_table);
}
} break;
case GGML_OP_PERMUTE:
axes_backward[axis2] = 2;
axes_backward[axis3] = 3;
src0->grad =
- ggml_add_impl(ctx, src0->grad,
+ ggml_add_or_set(ctx, src0->grad,
ggml_permute(ctx,
tensor->grad,
axes_backward[0],
axes_backward[1],
axes_backward[2],
axes_backward[3]),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_TRANSPOSE:
// necessary for llama
if (src0->grad) {
src0->grad =
- ggml_add_impl(ctx, src0->grad,
+ ggml_add_or_set(ctx, src0->grad,
ggml_transpose(ctx, tensor->grad),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_GET_ROWS:
// necessary for llama (only for tokenizer)
if (src0->grad) {
src0->grad =
- ggml_add_impl(ctx, src0->grad,
+ ggml_add_or_set(ctx, src0->grad,
+ // last ggml_get_rows_back argument src0->grad is only
+ // necessary to setup correct output shape
ggml_get_rows_back(ctx, tensor->grad, src1, src0->grad),
- inplace);
+ zero_table);
}
if (src1->grad) {
// noop
if (src0->grad) {
const int n_past = ((int32_t *) tensor->op_params)[0];
src0->grad =
- ggml_add_impl(ctx, src0->grad,
+ ggml_add_or_set(ctx, src0->grad,
ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_DIAG_MASK_ZERO:
if (src0->grad) {
const int n_past = ((int32_t *) tensor->op_params)[0];
src0->grad =
- ggml_add_impl(ctx, src0->grad,
+ ggml_add_or_set(ctx, src0->grad,
ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_SOFT_MAX:
// necessary for llama
if (src0->grad) {
src0->grad =
- ggml_add_impl(ctx, src0->grad,
+ ggml_add_or_set(ctx, src0->grad,
ggml_soft_max_back(ctx, tensor->grad, tensor),
- inplace);
+ zero_table);
}
} break;
{
// necessary for llama
if (src0->grad) {
- const int n_past = ((int32_t *) tensor->op_params)[0];
+ //const int n_past = ((int32_t *) tensor->op_params)[0];
const int n_dims = ((int32_t *) tensor->op_params)[1];
const int mode = ((int32_t *) tensor->op_params)[2];
const int n_ctx = ((int32_t *) tensor->op_params)[3];
memcpy(&xpos_base, (int32_t *) tensor->op_params + 6, sizeof(float));
memcpy(&xpos_down, (int32_t *) tensor->op_params + 7, sizeof(bool));
- src0->grad = ggml_add_impl(ctx,
+ src0->grad = ggml_add_or_set(ctx,
src0->grad,
ggml_rope_back(ctx,
tensor->grad,
- n_past,
+ src1,
n_dims,
mode,
n_ctx,
freq_scale,
xpos_base,
xpos_down),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_ROPE_BACK:
{
if (src0->grad) {
- const int n_past = ((int32_t *) tensor->op_params)[0];
+ //const int n_past = ((int32_t *) tensor->op_params)[0];
const int n_dims = ((int32_t *) tensor->op_params)[1];
const int mode = ((int32_t *) tensor->op_params)[2];
const int n_ctx = ((int32_t *) tensor->op_params)[3];
memcpy(&xpos_base, (int32_t *) tensor->op_params + 6, sizeof(float));
memcpy(&xpos_down, (int32_t *) tensor->op_params + 7, sizeof(bool));
- src0->grad = ggml_add_impl(ctx,
+ src0->grad = ggml_add_or_set(ctx,
src0->grad,
ggml_rope_impl(ctx,
tensor->grad,
- n_past,
+ src1,
n_dims,
mode,
+ 0,
n_ctx,
freq_base,
freq_scale,
+ 0.0f,
+ 1.0f,
+ 0.0f,
+ 0.0f,
xpos_base,
xpos_down,
false),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_ALIBI:
{
GGML_ASSERT(false); // TODO: not implemented
} break;
+ case GGML_OP_CONV_1D_STAGE_0:
+ {
+ GGML_ASSERT(false); // TODO: not implemented
+ } break;
+ case GGML_OP_CONV_1D_STAGE_1:
+ {
+ GGML_ASSERT(false); // TODO: not implemented
+ } break;
+ case GGML_OP_CONV_TRANSPOSE_1D:
+ {
+ GGML_ASSERT(false); // TODO: not implemented
+ } break;
case GGML_OP_CONV_2D:
{
GGML_ASSERT(false); // TODO: not implemented
} break;
+ case GGML_OP_CONV_2D_STAGE_0:
+ {
+ GGML_ASSERT(false); // TODO: not implemented
+ } break;
+ case GGML_OP_CONV_2D_STAGE_1:
+ {
+ GGML_ASSERT(false); // TODO: not implemented
+ } break;
case GGML_OP_CONV_TRANSPOSE_2D:
{
GGML_ASSERT(false); // TODO: not implemented
masked);
}
- if (src0->grad) {
- struct ggml_tensor * grad_q = NULL;
- const size_t nb0 = flash_grad->nb[0];
- const size_t offset = 0;
- switch(src0->n_dims) {
- case 2:
- {
- grad_q = ggml_view_2d(ctx,
- flash_grad,
- src0->ne[0],
- src0->ne[1],
- nb0*src0->ne[0],
- offset);
- } break;
- case 3:
- {
- grad_q = ggml_view_3d(ctx,
- flash_grad,
- src0->ne[0],
- src0->ne[1],
- src0->ne[2],
- nb0*src0->ne[0],
- nb0*src0->ne[0]*src0->ne[1],
- offset);
- } break;
- case 4:
- {
- grad_q = ggml_view_4d(ctx,
- flash_grad,
- src0->ne[0],
- src0->ne[1],
- src0->ne[2],
- src0->ne[3],
- nb0*src0->ne[0],
- nb0*src0->ne[0]*src0->ne[1],
- nb0*src0->ne[0]*src0->ne[1]*src0->ne[2],
- offset);
- } break;
- }
+ struct ggml_tensor * src2 = tensor->src[2];
+ const int64_t elem_q = ggml_nelements(src0);
+ const int64_t elem_k = ggml_nelements(src1);
+ const int64_t elem_v = ggml_nelements(src2);
+
+ enum ggml_type result_type = flash_grad->type;
+ GGML_ASSERT(ggml_blck_size(result_type) == 1);
+ const size_t tsize = ggml_type_size(result_type);
- src0->grad = ggml_add_impl(ctx,
+ const size_t offs_q = 0;
+ const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN);
+ const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN);
+
+ if (src0->grad) {
+ struct ggml_tensor * view_q = ggml_view_1d(ctx, flash_grad, elem_q, offs_q);
+ struct ggml_tensor * grad_q = ggml_reshape(ctx, view_q, src0);
+ src0->grad = ggml_add_or_set(ctx,
src0->grad,
grad_q,
- inplace);
+ zero_table);
}
-
- if (src1->grad) {
- struct ggml_tensor * grad_k = NULL;
- const size_t nb0 = flash_grad->nb[0];
- const size_t offset = nb0*src0->ne[0]*src0->ne[1]*src0->ne[2]*src0->ne[3];
- switch(src1->n_dims) {
- case 2:
- {
- grad_k = ggml_view_2d(ctx,
- flash_grad,
- src1->ne[0],
- src1->ne[1],
- nb0*src1->ne[0],
- offset);
- } break;
- case 3:
- {
- grad_k = ggml_view_3d(ctx,
- flash_grad,
- src1->ne[0],
- src1->ne[1],
- src1->ne[2],
- nb0*src1->ne[0],
- nb0*src1->ne[0]*src1->ne[1],
- offset);
- } break;
- case 4:
- {
- grad_k = ggml_view_4d(ctx,
- flash_grad,
- src1->ne[0],
- src1->ne[1],
- src1->ne[2],
- src1->ne[3],
- nb0*src1->ne[0],
- nb0*src1->ne[0]*src1->ne[1],
- nb0*src1->ne[0]*src1->ne[1]*src1->ne[2],
- offset);
- } break;
- }
-
- src1->grad = ggml_add_impl(ctx,
+ if (src1->grad) {
+ struct ggml_tensor * view_k = ggml_view_1d(ctx, flash_grad, elem_k, offs_k);
+ struct ggml_tensor * grad_k = ggml_reshape(ctx, view_k, src1);
+ src1->grad = ggml_add_or_set(ctx,
src1->grad,
grad_k,
- inplace);
+ zero_table);
}
-
- struct ggml_tensor * opt0 = tensor->src[2];
-
- if (opt0->grad) {
- struct ggml_tensor * grad_v = NULL;
- const size_t nb0 = flash_grad->nb[0];
- const size_t offset = nb0*src0->ne[0]*src0->ne[1]*src0->ne[2]*src0->ne[3]
- + nb0*src1->ne[0]*src1->ne[1]*src1->ne[2]*src1->ne[3];
- switch(opt0->n_dims) {
- case 2:
- {
- grad_v = ggml_view_2d(ctx,
- flash_grad,
- opt0->ne[0],
- opt0->ne[1],
- nb0*opt0->ne[0],
- offset);
- } break;
- case 3:
- {
- grad_v = ggml_view_3d(ctx,
- flash_grad,
- opt0->ne[0],
- opt0->ne[1],
- opt0->ne[2],
- nb0*opt0->ne[0],
- nb0*opt0->ne[0]*opt0->ne[1],
- offset);
- } break;
- case 4:
- {
- grad_v = ggml_view_4d(ctx,
- flash_grad,
- opt0->ne[0],
- opt0->ne[1],
- opt0->ne[2],
- opt0->ne[3],
- nb0*opt0->ne[0],
- nb0*opt0->ne[0]*opt0->ne[1],
- nb0*opt0->ne[0]*opt0->ne[1]*opt0->ne[2],
- offset);
- } break;
- }
-
- opt0->grad = ggml_add_impl(ctx,
- opt0->grad,
+ if (src2->grad) {
+ struct ggml_tensor * view_v = ggml_view_1d(ctx, flash_grad, elem_v, offs_v);
+ struct ggml_tensor * grad_v = ggml_reshape(ctx, view_v, src2);
+ src2->grad = ggml_add_or_set(ctx,
+ src2->grad,
grad_v,
- inplace);
+ zero_table);
}
} break;
case GGML_OP_FLASH_FF:
{
if (src0->grad) {
src0->grad =
- ggml_add_impl(ctx,
+ ggml_add_or_set(ctx,
src0->grad,
ggml_mul(ctx,
ggml_sgn(ctx, src0),
tensor->grad),
- inplace);
+ zero_table);
}
} break;
case GGML_UNARY_OP_SGN:
case GGML_UNARY_OP_NEG:
{
if (src0->grad) {
- src0->grad = ggml_sub_impl(ctx, src0->grad, tensor->grad, inplace);
+ src0->grad = ggml_sub_or_set(ctx, src0->grad, tensor->grad, zero_table);
}
} break;
case GGML_UNARY_OP_STEP:
case GGML_UNARY_OP_RELU:
{
if (src0->grad) {
- src0->grad = ggml_add_impl(ctx,
+ src0->grad = ggml_add_or_set(ctx,
src0->grad,
ggml_mul(ctx,
ggml_step(ctx, src0),
tensor->grad),
- inplace);
+ zero_table);
}
} break;
case GGML_UNARY_OP_GELU:
{
// necessary for llama
if (src0->grad) {
- src0->grad = ggml_add_impl(ctx,
+ src0->grad = ggml_add_or_set(ctx,
src0->grad,
ggml_silu_back(ctx, src0, tensor->grad),
- inplace);
+ zero_table);
}
} break;
default:
case GGML_OP_CROSS_ENTROPY_LOSS:
{
if (src0->grad) {
- src0->grad = ggml_add_impl(ctx,
+ src0->grad = ggml_add_or_set(ctx,
src0->grad,
ggml_cross_entropy_loss_back(ctx,
src0,
src1,
tensor->grad),
- inplace);
+ zero_table);
}
} break;
case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
GGML_ASSERT(false);
} break;
}
-}
-
-static_assert(GGML_GRAPH_HASHTABLE_SIZE > GGML_MAX_NODES * 2, "GGML_GRAPH_HT_SIZE is too small");
-
-static size_t hash(void * p) {
- return (size_t)p % GGML_GRAPH_HASHTABLE_SIZE;
-}
-
-static bool hash_insert(void * hash_table[], void * p) {
- size_t h = hash(p);
- // linear probing
- size_t i = h;
- while (hash_table[i] != NULL && hash_table[i] != p) {
- i = (i + 1) % GGML_GRAPH_HASHTABLE_SIZE;
- if (i == h) {
- // hash table is full
- GGML_ASSERT(false);
+ for (int i = 0; i < GGML_MAX_SRC; ++i) {
+ if (tensor->src[i] && tensor->src[i]->grad) {
+ GGML_ASSERT(ggml_are_same_shape(tensor->src[i], tensor->src[i]->grad));
}
}
-
- if (hash_table[i] == p) {
- return true;
- }
-
- // insert
- hash_table[i] = p;
- return false;
}
static void ggml_visit_parents(struct ggml_cgraph * cgraph, struct ggml_tensor * node) {
}
// check if already visited
- if (hash_insert(cgraph->visited_hash_table, node)) {
+ if (ggml_hash_insert(cgraph->visited_hash_table, node) == GGML_HASHTABLE_ALREADY_EXISTS) {
return;
}
for (int i = 0; i < GGML_MAX_SRC; ++i) {
- if (node->src[i]) {
- ggml_visit_parents(cgraph, node->src[i]);
+ const int k =
+ (cgraph->order == GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT) ? i :
+ (cgraph->order == GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT) ? (GGML_MAX_SRC-1-i) :
+ /* unknown order, just fall back to using i*/ i;
+ if (node->src[k]) {
+ ggml_visit_parents(cgraph, node->src[k]);
}
}
if (node->op == GGML_OP_NONE && node->grad == NULL) {
// reached a leaf node, not part of the gradient graph (e.g. a constant)
- GGML_ASSERT(cgraph->n_leafs < GGML_MAX_NODES);
+ GGML_ASSERT(cgraph->n_leafs < cgraph->size);
if (strlen(node->name) == 0) {
ggml_format_name(node, "leaf_%d", cgraph->n_leafs);
cgraph->leafs[cgraph->n_leafs] = node;
cgraph->n_leafs++;
} else {
- GGML_ASSERT(cgraph->n_nodes < GGML_MAX_NODES);
+ GGML_ASSERT(cgraph->n_nodes < cgraph->size);
if (strlen(node->name) == 0) {
ggml_format_name(node, "node_%d", cgraph->n_nodes);
}
cgraph->nodes[cgraph->n_nodes] = node;
- cgraph->grads[cgraph->n_nodes] = node->grad;
+ if (cgraph->grads) {
+ cgraph->grads[cgraph->n_nodes] = node->grad;
+ }
cgraph->n_nodes++;
}
}
static void ggml_build_forward_impl(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor, bool expand) {
if (!expand) {
- cgraph->n_nodes = 0;
- cgraph->n_leafs = 0;
+ // TODO: this branch isn't accessible anymore, maybe move this to ggml_build_forward_expand
+ ggml_graph_clear(cgraph);
}
const int n0 = cgraph->n_nodes;
ggml_build_forward_impl(cgraph, tensor, true);
}
-struct ggml_cgraph ggml_build_forward(struct ggml_tensor * tensor) {
- struct ggml_cgraph result = {
- /*.n_nodes =*/ 0,
- /*.n_leafs =*/ 0,
- /*.nodes =*/ { NULL },
- /*.grads =*/ { NULL },
- /*.leafs =*/ { NULL },
- /*.hash_table =*/ { NULL },
- /*.perf_runs =*/ 0,
- /*.perf_cycles =*/ 0,
- /*.perf_time_us =*/ 0,
- };
-
- ggml_build_forward_impl(&result, tensor, false);
-
- return result;
-}
-
void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep) {
GGML_ASSERT(gf->n_nodes > 0);
}
}
+ // remember original gradients which start with zero values
+ struct ggml_hash_set zero_table = ggml_hash_set_new(gf->size);
+ for (int i = 0; i < gf->n_nodes; i++) {
+ if (gf->grads[i]) {
+ ggml_hash_insert(zero_table, gf->grads[i]);
+ }
+ }
+
for (int i = gf->n_nodes - 1; i >= 0; i--) {
struct ggml_tensor * node = gf->nodes[i];
- // because we detached the grad nodes from the original graph, we can afford inplace operations
+ // inplace operations to add gradients are not created by ggml_compute_backward
+ // use allocator to automatically make inplace operations
if (node->grad) {
- ggml_compute_backward(ctx, node, keep);
+ ggml_compute_backward(ctx, node, zero_table);
}
}
ggml_build_forward_expand(gb, node->grad);
}
}
+
+ ggml_hash_set_free(zero_table);
}
-struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep) {
- struct ggml_cgraph result = *gf;
- ggml_build_backward_expand(ctx, gf, &result, keep);
- return result;
+static size_t ggml_graph_nbytes(size_t size, bool grads) {
+ size_t nbytes = sizeof(struct ggml_cgraph);
+ nbytes += size * sizeof(struct ggml_tensor *) * 2; // leafs + nodes
+ if (grads) {
+ nbytes += size * sizeof(struct ggml_tensor *); // grads
+ }
+ nbytes += ggml_hash_size(size * 2) * sizeof(struct ggml_tensor *); // hash set
+ return nbytes;
}
-struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) {
- struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, GGML_GRAPH_SIZE);
+size_t ggml_graph_overhead_custom(size_t size, bool grads) {
+ return GGML_OBJECT_SIZE + GGML_PAD(ggml_graph_nbytes(size, grads), GGML_MEM_ALIGN);
+}
+
+size_t ggml_graph_overhead(void) {
+ return ggml_graph_overhead_custom(GGML_DEFAULT_GRAPH_SIZE, false);
+}
+
+struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t size, bool grads) {
+ const size_t obj_size = ggml_graph_nbytes(size, grads);
+ struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, obj_size);
struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs);
+ struct ggml_tensor ** data_start = (struct ggml_tensor **) (cgraph + 1);
+
+ size_t hash_size = ggml_hash_size(size * 2);
+ struct ggml_tensor ** nodes_ptr = data_start;
+ struct ggml_tensor ** leafs_ptr = nodes_ptr + size;
+ struct ggml_tensor ** hash_keys_ptr = leafs_ptr + size;
+ struct ggml_tensor ** grads_ptr = grads ? hash_keys_ptr + hash_size : NULL;
+
+ // check that we allocated the correct amount of memory
+ assert(obj_size == (size_t) (
+ (grads ? (char *)(grads_ptr + size) : (char *)(hash_keys_ptr + hash_size)) - (char *)cgraph));
+
+ memset(hash_keys_ptr, 0, hash_size * sizeof(struct ggml_tensor *));
+
*cgraph = (struct ggml_cgraph) {
+ /*.size =*/ size,
/*.n_nodes =*/ 0,
/*.n_leafs =*/ 0,
- /*.nodes =*/ { NULL },
- /*.grads =*/ { NULL },
- /*.leafs =*/ { NULL },
- /*.hash_table =*/ { NULL },
+ /*.nodes =*/ nodes_ptr,
+ /*.grads =*/ grads_ptr,
+ /*.leafs =*/ leafs_ptr,
+ /*.hash_table =*/ { hash_size, hash_keys_ptr },
+ /*.order =*/ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT,
/*.perf_runs =*/ 0,
/*.perf_cycles =*/ 0,
/*.perf_time_us =*/ 0,
return cgraph;
}
-struct ggml_cgraph * ggml_build_forward_ctx(struct ggml_context * ctx, struct ggml_tensor * tensor) {
- struct ggml_cgraph * cgraph = ggml_new_graph(ctx);
- ggml_build_forward_impl(cgraph, tensor, false);
+struct ggml_cgraph * ggml_new_graph(struct ggml_context * ctx) {
+ return ggml_new_graph_custom(ctx, GGML_DEFAULT_GRAPH_SIZE, false);
+}
+
+struct ggml_cgraph * ggml_graph_view(struct ggml_context * ctx, struct ggml_cgraph * cgraph0, int i0, int i1) {
+ const size_t obj_size = sizeof(struct ggml_cgraph);
+ struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, obj_size);
+ struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs);
+
+ *cgraph = (struct ggml_cgraph) {
+ /*.size =*/ 0,
+ /*.n_nodes =*/ i1 - i0,
+ /*.n_leafs =*/ 0,
+ /*.nodes =*/ cgraph0->nodes + i0,
+ /*.grads =*/ cgraph0->grads ? cgraph0->grads + i0 : NULL,
+ /*.leafs =*/ NULL,
+ /*.hash_table =*/ { 0, NULL },
+ /*.order =*/ cgraph0->order,
+ /*.perf_runs =*/ 0,
+ /*.perf_cycles =*/ 0,
+ /*.perf_time_us =*/ 0,
+ };
+
return cgraph;
}
-size_t ggml_graph_overhead(void) {
- return GGML_OBJECT_SIZE + GGML_PAD(GGML_GRAPH_SIZE, GGML_MEM_ALIGN);
+void ggml_graph_cpy(struct ggml_cgraph * src, struct ggml_cgraph * dst) {
+ GGML_ASSERT(dst->size >= src->n_leafs);
+ GGML_ASSERT(dst->size >= src->n_nodes);
+ GGML_ASSERT(dst->visited_hash_table.size >= src->visited_hash_table.size);
+
+ dst->n_leafs = src->n_leafs;
+ dst->n_nodes = src->n_nodes;
+ dst->order = src->order;
+
+ for (int i = 0; i < src->n_leafs; ++i) {
+ dst->leafs[i] = src->leafs[i];
+ }
+
+ for (int i = 0; i < src->n_nodes; ++i) {
+ dst->nodes[i] = src->nodes[i];
+ }
+
+ if (src->grads) {
+ GGML_ASSERT(dst->grads != NULL);
+ for (int i = 0; i < src->n_nodes; ++i) {
+ dst->grads[i] = src->grads[i];
+ }
+ }
+
+ for (size_t i = 0; i < src->visited_hash_table.size; ++i) {
+ if (src->visited_hash_table.keys[i]) {
+ ggml_hash_insert(dst->visited_hash_table, src->visited_hash_table.keys[i]);
+ }
+ }
+}
+
+struct ggml_cgraph * ggml_graph_dup(struct ggml_context * ctx, struct ggml_cgraph * cgraph) {
+ struct ggml_cgraph * result = ggml_new_graph_custom(ctx, cgraph->size, cgraph->grads != NULL);
+ ggml_graph_cpy(cgraph, result);
+ return result;
+}
+
+void ggml_graph_reset(struct ggml_cgraph * cgraph) {
+ GGML_ASSERT(cgraph->grads != NULL);
+
+ for (int i = 0; i < cgraph->n_nodes; i++) {
+ struct ggml_tensor * grad = cgraph->grads[i];
+
+ if (grad) {
+ ggml_set_zero(grad);
+ }
+ }
+}
+
+void ggml_graph_clear(struct ggml_cgraph * cgraph) {
+ cgraph->n_leafs = 0;
+ cgraph->n_nodes = 0;
+ memset(cgraph->visited_hash_table.keys, 0, cgraph->visited_hash_table.size * sizeof(struct ggml_tensor *));
}
//
struct ggml_compute_state_shared * shared;
};
-static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
- int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
- int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
+static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) {
+ int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles;
+ int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us;
+
+ node->perf_runs++;
+ node->perf_cycles += cycles_cur;
+ node->perf_time_us += time_us_cur;
+}
+
+static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
+ int n_tasks = 0;
+
+ switch (node->op) {
+ case GGML_OP_CPY:
+ case GGML_OP_DUP:
+ case GGML_OP_ADD:
+ case GGML_OP_ADD1:
+ case GGML_OP_ACC:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_SUB:
+ case GGML_OP_DIV:
+ case GGML_OP_SQR:
+ case GGML_OP_SQRT:
+ case GGML_OP_LOG:
+ case GGML_OP_SUM:
+ case GGML_OP_SUM_ROWS:
+ case GGML_OP_MEAN:
+ case GGML_OP_ARGMAX:
+ case GGML_OP_REPEAT:
+ case GGML_OP_REPEAT_BACK:
+ {
+ n_tasks = 1;
+ } break;
+ case GGML_OP_UNARY:
+ switch (ggml_get_unary_op(node)) {
+ case GGML_UNARY_OP_ABS:
+ case GGML_UNARY_OP_SGN:
+ case GGML_UNARY_OP_NEG:
+ case GGML_UNARY_OP_STEP:
+ case GGML_UNARY_OP_TANH:
+ case GGML_UNARY_OP_ELU:
+ case GGML_UNARY_OP_RELU:
+ case GGML_UNARY_OP_LEAKY:
+ {
+ n_tasks = 1;
+ } break;
+
+ case GGML_UNARY_OP_GELU:
+ case GGML_UNARY_OP_GELU_QUICK:
+ case GGML_UNARY_OP_SILU:
+ {
+ n_tasks = n_threads;
+ } break;
+ }
+ break;
+ case GGML_OP_SILU_BACK:
+ case GGML_OP_MUL:
+ case GGML_OP_NORM:
+ case GGML_OP_RMS_NORM:
+ case GGML_OP_RMS_NORM_BACK:
+ case GGML_OP_GROUP_NORM:
+ case GGML_OP_CONCAT:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_MUL_MAT:
+ {
+ n_tasks = n_threads;
+
+ // TODO: use different scheduling for different matrix sizes
+ //const int nr0 = ggml_nrows(node->src[0]);
+ //const int nr1 = ggml_nrows(node->src[1]);
+
+ //n_tasks = MIN(n_threads, MAX(1, nr0/128));
+ //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
+
+#if defined(GGML_USE_CUBLAS)
+ if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) {
+ n_tasks = 1; // TODO: this actually is doing nothing
+ // the threads are still spinning
+ }
+#elif defined(GGML_USE_CLBLAST)
+ if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
+ n_tasks = 1; // TODO: this actually is doing nothing
+ // the threads are still spinning
+ }
+#endif
+#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
+ if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
+ n_tasks = 1; // TODO: this actually is doing nothing
+ // the threads are still spinning
+ }
+#endif
+ } break;
+ case GGML_OP_OUT_PROD:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_SCALE:
+ case GGML_OP_SET:
+ case GGML_OP_CONT:
+ case GGML_OP_RESHAPE:
+ case GGML_OP_VIEW:
+ case GGML_OP_PERMUTE:
+ case GGML_OP_TRANSPOSE:
+ case GGML_OP_GET_ROWS:
+ case GGML_OP_GET_ROWS_BACK:
+ case GGML_OP_DIAG:
+ {
+ n_tasks = 1;
+ } break;
+ case GGML_OP_DIAG_MASK_ZERO:
+ case GGML_OP_DIAG_MASK_INF:
+ case GGML_OP_SOFT_MAX:
+ case GGML_OP_SOFT_MAX_BACK:
+ case GGML_OP_ROPE:
+ case GGML_OP_ROPE_BACK:
+ case GGML_OP_ADD_REL_POS:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_ALIBI:
+ {
+ n_tasks = 1; //TODO
+ } break;
+ case GGML_OP_CLAMP:
+ {
+ n_tasks = 1; //TODO
+ } break;
+ case GGML_OP_CONV_1D:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_CONV_1D_STAGE_0:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_CONV_1D_STAGE_1:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_CONV_TRANSPOSE_1D:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_CONV_2D:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_CONV_2D_STAGE_0:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_CONV_2D_STAGE_1:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_CONV_TRANSPOSE_2D:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_POOL_1D:
+ case GGML_OP_POOL_2D:
+ {
+ n_tasks = 1;
+ } break;
+ case GGML_OP_UPSCALE:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_FLASH_ATTN:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_FLASH_FF:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_FLASH_ATTN_BACK:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_WIN_PART:
+ case GGML_OP_WIN_UNPART:
+ case GGML_OP_GET_REL_POS:
+ case GGML_OP_MAP_UNARY:
+ case GGML_OP_MAP_BINARY:
+ case GGML_OP_MAP_CUSTOM1_F32:
+ case GGML_OP_MAP_CUSTOM2_F32:
+ case GGML_OP_MAP_CUSTOM3_F32:
+ {
+ n_tasks = 1;
+ } break;
+ case GGML_OP_MAP_CUSTOM1:
+ {
+ struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params;
+ if (p->n_tasks == GGML_N_TASKS_MAX) {
+ n_tasks = n_threads;
+ } else {
+ n_tasks = MIN(p->n_tasks, n_threads);
+ }
+ } break;
+ case GGML_OP_MAP_CUSTOM2:
+ {
+ struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params;
+ if (p->n_tasks == GGML_N_TASKS_MAX) {
+ n_tasks = n_threads;
+ } else {
+ n_tasks = MIN(p->n_tasks, n_threads);
+ }
+ } break;
+ case GGML_OP_MAP_CUSTOM3:
+ {
+ struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params;
+ if (p->n_tasks == GGML_N_TASKS_MAX) {
+ n_tasks = n_threads;
+ } else {
+ n_tasks = MIN(p->n_tasks, n_threads);
+ }
+ } break;
+ case GGML_OP_CROSS_ENTROPY_LOSS:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_NONE:
+ {
+ n_tasks = 1;
+ } break;
+ case GGML_OP_COUNT:
+ {
+ GGML_ASSERT(false);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+
+ assert(n_tasks > 0);
- node->perf_runs++;
- node->perf_cycles += cycles_cur;
- node->perf_time_us += time_us_cur;
+ return n_tasks;
}
static thread_ret_t ggml_graph_compute_thread(void * data) {
const struct ggml_cgraph * cgraph = state->shared->cgraph;
const struct ggml_cplan * cplan = state->shared->cplan;
- const int * n_tasks_arr = cplan->n_tasks;
const int n_threads = state->shared->n_threads;
set_numa_thread_affinity(state->ith, n_threads);
if (node_n != -1) {
/* FINALIZE */
- struct ggml_tensor * node = state->shared->cgraph->nodes[node_n];
+ struct ggml_tensor * node = cgraph->nodes[node_n];
if (GGML_OP_HAS_FINALIZE[node->op]) {
- params.nth = n_tasks_arr[node_n];
+ params.nth = ggml_get_n_tasks(node, n_threads);
ggml_compute_forward(¶ms, node);
}
ggml_graph_compute_perf_stats_node(node, state->shared);
GGML_PRINT_DEBUG_5("%s: %d/%d\n", __func__, node_n, cgraph->n_nodes);
struct ggml_tensor * node = cgraph->nodes[node_n];
- const int n_tasks = n_tasks_arr[node_n];
+ const int n_tasks = ggml_get_n_tasks(node, n_threads);
state->shared->perf_node_start_cycles = ggml_perf_cycles();
state->shared->perf_node_start_time_us = ggml_perf_time_us();
/* COMPUTE */
struct ggml_tensor * node = cgraph->nodes[node_n];
- const int n_tasks = n_tasks_arr[node_n];
+ const int n_tasks = ggml_get_n_tasks(node, n_threads);
struct ggml_compute_params params = {
/*.type =*/ GGML_TASK_COMPUTE,
struct ggml_tensor * node = cgraph->nodes[i];
+ size_t cur = 0;
+
switch (node->op) {
case GGML_OP_CPY:
case GGML_OP_DUP:
{
n_tasks = n_threads;
- size_t cur = 0;
if (ggml_is_quantized(node->type)) {
cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
}
-
- work_size = MAX(work_size, cur);
} break;
case GGML_OP_ADD:
case GGML_OP_ADD1:
{
n_tasks = n_threads;
- size_t cur = 0;
-
if (ggml_is_quantized(node->src[0]->type)) {
cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
}
-
- work_size = MAX(work_size, cur);
} break;
case GGML_OP_ACC:
{
n_tasks = n_threads;
- size_t cur = 0;
-
if (ggml_is_quantized(node->src[0]->type)) {
cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks;
}
-
- work_size = MAX(work_size, cur);
- } break;
- case GGML_OP_SUB:
- case GGML_OP_DIV:
- case GGML_OP_SQR:
- case GGML_OP_SQRT:
- case GGML_OP_LOG:
- case GGML_OP_SUM:
- case GGML_OP_SUM_ROWS:
- case GGML_OP_MEAN:
- case GGML_OP_ARGMAX:
- case GGML_OP_REPEAT:
- case GGML_OP_REPEAT_BACK:
- {
- n_tasks = 1;
- } break;
-
- case GGML_OP_UNARY:
- {
- switch (ggml_get_unary_op(node)) {
- case GGML_UNARY_OP_ABS:
- case GGML_UNARY_OP_SGN:
- case GGML_UNARY_OP_NEG:
- case GGML_UNARY_OP_STEP:
- case GGML_UNARY_OP_TANH:
- case GGML_UNARY_OP_ELU:
- case GGML_UNARY_OP_RELU:
- {
- n_tasks = 1;
- } break;
-
- case GGML_UNARY_OP_GELU:
- case GGML_UNARY_OP_GELU_QUICK:
- case GGML_UNARY_OP_SILU:
- {
- n_tasks = n_threads;
- } break;
- }
- } break;
- case GGML_OP_SILU_BACK:
- case GGML_OP_MUL:
- case GGML_OP_NORM:
- case GGML_OP_RMS_NORM:
- case GGML_OP_RMS_NORM_BACK:
- case GGML_OP_GROUP_NORM:
- {
- n_tasks = n_threads;
} break;
- case GGML_OP_CONCAT:
case GGML_OP_MUL_MAT:
- case GGML_OP_OUT_PROD:
{
- n_tasks = n_threads;
-
- // TODO: use different scheduling for different matrix sizes
- //const int nr0 = ggml_nrows(node->src[0]);
- //const int nr1 = ggml_nrows(node->src[1]);
-
- //n_tasks = MIN(n_threads, MAX(1, nr0/128));
- //printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks%d\n", nr0, nr1, nr0*nr1, n_tasks);
-
- size_t cur = 0;
const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
-#if defined(GGML_USE_CUBLAS)
- if (ggml_cuda_can_mul_mat(node->src[0], node->src[1], node)) {
- n_tasks = 1; // TODO: this actually is doing nothing
- // the threads are still spinning
- } else
-#elif defined(GGML_USE_CLBLAST)
+#if defined(GGML_USE_CLBLAST)
if (ggml_cl_can_mul_mat(node->src[0], node->src[1], node)) {
- n_tasks = 1; // TODO: this actually is doing nothing
- // the threads are still spinning
cur = ggml_cl_mul_mat_get_wsize(node->src[0], node->src[1], node);
} else
#endif
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
if (ggml_compute_forward_mul_mat_use_blas(node->src[0], node->src[1], node)) {
- n_tasks = 1; // TODO: this actually is doing nothing
- // the threads are still spinning
if (node->src[0]->type != GGML_TYPE_F32) {
// here we need memory just for single 2D matrix from src0
cur = ggml_type_size(GGML_TYPE_F32)*(node->src[0]->ne[0]*node->src[0]->ne[1]);
#endif
if (node->src[1]->type != vec_dot_type) {
cur = ggml_type_size(vec_dot_type)*ggml_nelements(node->src[1])/ggml_blck_size(vec_dot_type);
- } else {
- cur = 0;
}
-
- work_size = MAX(work_size, cur);
- } break;
- case GGML_OP_SCALE:
- {
- n_tasks = 1;
- } break;
- case GGML_OP_SET:
- case GGML_OP_CONT:
- case GGML_OP_RESHAPE:
- case GGML_OP_VIEW:
- case GGML_OP_PERMUTE:
- case GGML_OP_TRANSPOSE:
- case GGML_OP_GET_ROWS:
- case GGML_OP_GET_ROWS_BACK:
- case GGML_OP_DIAG:
- {
- n_tasks = 1;
} break;
- case GGML_OP_DIAG_MASK_ZERO:
- case GGML_OP_DIAG_MASK_INF:
- case GGML_OP_SOFT_MAX:
- case GGML_OP_SOFT_MAX_BACK:
- case GGML_OP_ROPE:
- case GGML_OP_ROPE_BACK:
- case GGML_OP_ADD_REL_POS:
+ case GGML_OP_OUT_PROD:
{
n_tasks = n_threads;
- } break;
- case GGML_OP_ALIBI:
- {
- n_tasks = 1; //TODO
- } break;
- case GGML_OP_CLAMP:
- {
- n_tasks = 1; //TODO
+
+ if (ggml_is_quantized(node->src[0]->type)) {
+ cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
+ }
} break;
case GGML_OP_CONV_1D:
{
- n_tasks = n_threads;
-
GGML_ASSERT(node->src[0]->ne[3] == 1);
GGML_ASSERT(node->src[1]->ne[2] == 1);
GGML_ASSERT(node->src[1]->ne[3] == 1);
- size_t cur = 0;
- const int nk = node->src[0]->ne[0];
+ const int64_t ne00 = node->src[0]->ne[0];
+ const int64_t ne01 = node->src[0]->ne[1];
+ const int64_t ne02 = node->src[0]->ne[2];
+
+ const int64_t ne10 = node->src[1]->ne[0];
+ const int64_t ne11 = node->src[1]->ne[1];
+
+ const int64_t ne0 = node->ne[0];
+ const int64_t ne1 = node->ne[1];
+ const int64_t nk = ne00;
+ const int64_t ew0 = nk * ne01;
+
+ UNUSED(ne02);
+ UNUSED(ne10);
+ UNUSED(ne11);
if (node->src[0]->type == GGML_TYPE_F16 &&
- node->src[1]->type == GGML_TYPE_F32) {
- cur = sizeof(ggml_fp16_t)*(
- nk*ggml_up32(node->src[0]->ne[1])*node->src[0]->ne[2] +
- ( 2*(nk/2) + node->src[1]->ne[0])*node->src[1]->ne[1]
- );
+ node->src[1]->type == GGML_TYPE_F32) {
+ cur = sizeof(ggml_fp16_t)*(ne0*ne1*ew0);
} else if (node->src[0]->type == GGML_TYPE_F32 &&
- node->src[1]->type == GGML_TYPE_F32) {
- cur = sizeof(float)*(
- nk*ggml_up32(node->src[0]->ne[1])*node->src[0]->ne[2] +
- ( 2*(nk/2) + node->src[1]->ne[0])*node->src[1]->ne[1]
- );
+ node->src[1]->type == GGML_TYPE_F32) {
+ cur = sizeof(float)*(ne0*ne1*ew0);
} else {
GGML_ASSERT(false);
}
+ } break;
+ case GGML_OP_CONV_TRANSPOSE_1D:
+ {
+ GGML_ASSERT(node->src[0]->ne[3] == 1);
+ GGML_ASSERT(node->src[1]->ne[2] == 1);
+ GGML_ASSERT(node->src[1]->ne[3] == 1);
- work_size = MAX(work_size, cur);
+ const int64_t ne00 = node->src[0]->ne[0]; // K
+ const int64_t ne01 = node->src[0]->ne[1]; // Cout
+ const int64_t ne02 = node->src[0]->ne[2]; // Cin
+
+ const int64_t ne10 = node->src[1]->ne[0]; // L
+ const int64_t ne11 = node->src[1]->ne[1]; // Cin
+
+ if (node->src[0]->type == GGML_TYPE_F16 &&
+ node->src[1]->type == GGML_TYPE_F32) {
+ cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02;
+ cur += sizeof(ggml_fp16_t)*ne10*ne11;
+ } else if (node->src[0]->type == GGML_TYPE_F32 &&
+ node->src[1]->type == GGML_TYPE_F32) {
+ cur += sizeof(float)*ne00*ne01*ne02;
+ cur += sizeof(float)*ne10*ne11;
+ } else {
+ GGML_ASSERT(false);
+ }
} break;
case GGML_OP_CONV_2D:
{
- n_tasks = n_threads;
-
const int64_t ne00 = node->src[0]->ne[0]; // W
const int64_t ne01 = node->src[0]->ne[1]; // H
const int64_t ne02 = node->src[0]->ne[2]; // C
const int64_t ne0 = node->ne[0];
const int64_t ne1 = node->ne[1];
const int64_t ne2 = node->ne[2];
+ const int64_t ne3 = node->ne[3];
const int64_t nk = ne00*ne01;
const int64_t ew0 = nk * ne02;
UNUSED(ne03);
UNUSED(ne2);
- size_t cur = 0;
-
if (node->src[0]->type == GGML_TYPE_F16 &&
node->src[1]->type == GGML_TYPE_F32) {
- cur = sizeof(ggml_fp16_t)*(ne0*ne1*ew0);
+ // im2col: [N*OH*OW, IC*KH*KW]
+ cur = sizeof(ggml_fp16_t)*(ne3*ne0*ne1*ew0);
} else if (node->src[0]->type == GGML_TYPE_F32 &&
node->src[1]->type == GGML_TYPE_F32) {
cur = sizeof(float)* (ne10*ne11*ne12);
} else {
GGML_ASSERT(false);
}
-
- work_size = MAX(work_size, cur);
} break;
case GGML_OP_CONV_TRANSPOSE_2D:
{
- n_tasks = n_threads;
-
const int64_t ne00 = node->src[0]->ne[0]; // W
const int64_t ne01 = node->src[0]->ne[1]; // H
const int64_t ne02 = node->src[0]->ne[2]; // Channels Out
const int64_t ne11 = node->src[1]->ne[1]; // H
const int64_t ne12 = node->src[1]->ne[2]; // Channels In
- size_t cur = 0;
cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03;
cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12;
-
- work_size = MAX(work_size, cur);
- } break;
- case GGML_OP_POOL_1D:
- case GGML_OP_POOL_2D:
- {
- n_tasks = 1;
- } break;
- case GGML_OP_UPSCALE:
- {
- n_tasks = n_threads;
} break;
case GGML_OP_FLASH_ATTN:
{
n_tasks = n_threads;
- size_t cur = 0;
-
const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
if (node->src[1]->type == GGML_TYPE_F32) {
cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
- }
-
- if (node->src[1]->type == GGML_TYPE_F16) {
+ } else if (node->src[1]->type == GGML_TYPE_F16) {
cur = sizeof(float)*ne11*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*ne11*n_tasks; // this is overestimated by x2
}
-
- work_size = MAX(work_size, cur);
} break;
case GGML_OP_FLASH_FF:
{
n_tasks = n_threads;
- size_t cur = 0;
-
if (node->src[1]->type == GGML_TYPE_F32) {
cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
- }
-
- if (node->src[1]->type == GGML_TYPE_F16) {
+ } else if (node->src[1]->type == GGML_TYPE_F16) {
cur = sizeof(float)*node->src[1]->ne[1]*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*node->src[1]->ne[1]*n_tasks; // this is overestimated by x2
}
-
- work_size = MAX(work_size, cur);
} break;
case GGML_OP_FLASH_ATTN_BACK:
{
n_tasks = n_threads;
- size_t cur = 0;
-
const int64_t D = node->src[0]->ne[0];
const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
if (node->src[1]->type == GGML_TYPE_F32) {
cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
- }
-
- if (node->src[1]->type == GGML_TYPE_F16) {
+ } else if (node->src[1]->type == GGML_TYPE_F16) {
cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
}
-
- work_size = MAX(work_size, cur);
- } break;
- case GGML_OP_WIN_PART:
- case GGML_OP_WIN_UNPART:
- case GGML_OP_GET_REL_POS:
- case GGML_OP_MAP_UNARY:
- case GGML_OP_MAP_BINARY:
- case GGML_OP_MAP_CUSTOM1_F32:
- case GGML_OP_MAP_CUSTOM2_F32:
- case GGML_OP_MAP_CUSTOM3_F32:
- {
- n_tasks = 1;
- } break;
- case GGML_OP_MAP_CUSTOM1:
- {
- struct ggml_map_custom1_op_params * p = (struct ggml_map_custom1_op_params *) node->op_params;
- if (p->n_tasks == GGML_N_TASKS_MAX) {
- n_tasks = n_threads;
- } else {
- n_tasks = MIN(p->n_tasks, n_threads);
- }
- } break;
- case GGML_OP_MAP_CUSTOM2:
- {
- struct ggml_map_custom2_op_params * p = (struct ggml_map_custom2_op_params *) node->op_params;
- if (p->n_tasks == GGML_N_TASKS_MAX) {
- n_tasks = n_threads;
- } else {
- n_tasks = MIN(p->n_tasks, n_threads);
- }
- } break;
- case GGML_OP_MAP_CUSTOM3:
- {
- struct ggml_map_custom3_op_params * p = (struct ggml_map_custom3_op_params *) node->op_params;
- if (p->n_tasks == GGML_N_TASKS_MAX) {
- n_tasks = n_threads;
- } else {
- n_tasks = MIN(p->n_tasks, n_threads);
- }
} break;
+
case GGML_OP_CROSS_ENTROPY_LOSS:
{
n_tasks = n_threads;
- size_t cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
-
- work_size = MAX(work_size, cur);
- } break;
- case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
- {
- n_tasks = n_threads;
- } break;
- case GGML_OP_NONE:
- {
- n_tasks = 1;
+ cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
} break;
case GGML_OP_COUNT:
{
GGML_ASSERT(false);
} break;
+ default:
+ break;
}
- cplan.n_tasks[i] = n_tasks;
+ work_size = MAX(work_size, cur);
}
if (work_size > 0) {
if (cplan->work_size > 0) {
GGML_ASSERT(cplan->work_data);
}
-
- for (int i = 0; i < cgraph->n_nodes; ++i) {
- if (cgraph->nodes[i]->op != GGML_OP_NONE) {
- GGML_ASSERT(cplan->n_tasks[i] > 0);
- }
- }
}
const int n_threads = cplan->n_threads;
return compute_status;
}
-void ggml_graph_reset(struct ggml_cgraph * cgraph) {
- for (int i = 0; i < cgraph->n_nodes; i++) {
- struct ggml_tensor * grad = cgraph->grads[i];
-
- if (grad) {
- ggml_set_zero(grad);
- }
- }
-}
-
void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads);
const uint32_t magic = GGML_FILE_MAGIC;
const uint32_t version = GGML_FILE_VERSION;
const uint32_t n_leafs = cgraph->n_leafs;
- const uint32_t nodes = cgraph->n_nodes;
+ const uint32_t n_nodes = cgraph->n_nodes;
fwrite(&magic, sizeof(uint32_t), 1, fout);
fwrite(&version, sizeof(uint32_t), 1, fout);
fwrite(&n_leafs, sizeof(uint32_t), 1, fout);
- fwrite(&nodes, sizeof(uint32_t), 1, fout);
+ fwrite(&n_nodes, sizeof(uint32_t), 1, fout);
fwrite(&size_eval, sizeof(uint64_t), 1, fout);
}
if (idx == -1) {
for (int k = 0; k < cgraph->n_nodes; ++k) {
if (args[j] == cgraph->nodes[k]) {
- idx = GGML_MAX_NODES + k;
+ idx = cgraph->n_leafs + k;
break;
}
}
if (idx == -1) {
fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i);
+ fclose(fout);
return;
}
}
}
-struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
+struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval) {
assert(*ctx_data == NULL);
assert(*ctx_eval == NULL);
- struct ggml_cgraph result = { 0 };
+ struct ggml_cgraph * result = NULL;
struct ggml_tensor * data = NULL;
const uint32_t n_leafs = *(const uint32_t *) ptr; ptr += sizeof(n_leafs);
const uint32_t n_nodes = *(const uint32_t *) ptr; ptr += sizeof(n_nodes);
const uint64_t size_eval = *(const uint64_t *) ptr; ptr += sizeof(size_eval);
-
- result.n_leafs = n_leafs;
- result.n_nodes = n_nodes;
+ const int graph_size = MAX(n_leafs, n_nodes);
// create the data context
{
- const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead();
+ const size_t overhead = (n_leafs + n_nodes)*ggml_tensor_overhead() + ggml_graph_overhead_custom(graph_size, false);
struct ggml_init_params params = {
.mem_size = size_eval + overhead,
}
}
+ result = ggml_new_graph_custom(*ctx_eval, graph_size, false);
+
+ result->n_leafs = n_leafs;
+ result->n_nodes = n_nodes;
+
+
// leafs
{
uint32_t type;
tensor->nb[j] = nb[j];
}
- result.leafs[i] = tensor;
+ result->leafs[i] = tensor;
ptr += ggml_nbytes(tensor);
continue;
}
- if (arg_idx < GGML_MAX_NODES) {
- args[j] = result.leafs[arg_idx];
+ if (arg_idx < result->n_leafs) {
+ args[j] = result->leafs[arg_idx];
} else {
- args[j] = result.nodes[arg_idx - GGML_MAX_NODES];
+ args[j] = result->nodes[arg_idx - result->n_leafs];
}
}
tensor->src[j] = args[j];
}
- result.nodes[i] = tensor;
+ result->nodes[i] = tensor;
fprintf(stderr, "%s: loaded node %d: '%16s', %3d dims, %9zu bytes\n", __func__, i, tensor->name, n_dims, ggml_nbytes(tensor));
}
}
static void ggml_opt_get_grad(int np, struct ggml_tensor * const ps[], float * g) {
- int i = 0;
+ int64_t i = 0;
for (int p = 0; p < np; ++p) {
const int64_t ne = ggml_nelements(ps[p]) ;
// TODO: add function to get all elements at once
}
}
+static void ggml_opt_acc_grad(int np, struct ggml_tensor * const ps[], float * g, float scale) {
+ int64_t i = 0;
+ for (int p = 0; p < np; ++p) {
+ const int64_t ne = ggml_nelements(ps[p]) ;
+ // TODO: add function to get all elements at once
+ for (int64_t j = 0; j < ne; ++j) {
+ g[i++] += ggml_get_f32_1d(ps[p]->grad, j) * scale;
+ }
+ }
+}
+
//
// ADAM
//
const float eps = params.adam.eps;
const float gclip = params.adam.gclip;
const int decay_min_ndim = params.adam.decay_min_ndim;
+ const int n_accum = MAX(1, params.n_gradient_accumulation);
+ const float accum_norm = 1.0f / (float) n_accum;
+ float * g = opt->adam.g->data; // gradients
float * m = opt->adam.m->data; // first moment
float * v = opt->adam.v->data; // second moment
float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
- if (callback) {
- callback(callback_data, &sched);
- }
-
- // compute the function value
- ggml_graph_reset (gf);
- ggml_set_f32 (f->grad, 1.0f);
-
struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
- ggml_graph_compute(gb, &cplan);
- opt->adam.fx_prev = ggml_get_f32_1d(f, 0);
+ bool cancel = false;
+
+ // compute the function value
+ float fx = 0;
+ ggml_set_zero(opt->adam.g);
+ for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
+ if (callback) {
+ callback(callback_data, accum_step, &sched, &cancel);
+ if (cancel) {
+ return GGML_OPT_CANCEL;
+ }
+ }
+ // ggml_graph_reset (gf);
+ ggml_set_f32 (f->grad, 1.0f);
+ ggml_graph_compute(gb, &cplan);
+ ggml_opt_acc_grad(np, ps, g, accum_norm);
+ fx += ggml_get_f32_1d(f, 0);
+ }
+ fx *= accum_norm;
+
+ opt->adam.fx_prev = fx;
opt->adam.fx_best = opt->adam.fx_prev;
if (pf) {
pf[opt->iter % params.past] = opt->adam.fx_prev;
if (gclip > 0.0f) {
// gradient clipping
ggml_float sum = 0.0;
- for (int p = 0; p < np; ++p) {
- const int64_t ne = ggml_nelements(ps[p]);
- for (int64_t j = 0; j < ne; ++j) {
- float g = ggml_get_f32_1d(ps[p]->grad, j);
- sum += (ggml_float)(g*g);
- }
+ for (int64_t i = 0; i < nx; ++i) {
+ sum += (ggml_float)(g[i]*g[i]);
}
ggml_float norm = sqrt(sum);
if (norm > (ggml_float) gclip) {
const int64_t ne = ggml_nelements(ps[p]);
const float p_decay = ((ps[p]->n_dims >= decay_min_ndim) ? decay : 0.0f) * sched;
for (int64_t j = 0; j < ne; ++j) {
- float x = ggml_get_f32_1d(ps[p], j);
- float g = ggml_get_f32_1d(ps[p]->grad, j)*gnorm;
- m[i] = m[i]*beta1 + g*(1.0f - beta1);
- v[i] = v[i]*beta2 + g*g*(1.0f - beta2);
+ float x = ggml_get_f32_1d(ps[p], j);
+ float g_ = g[i]*gnorm;
+ m[i] = m[i]*beta1 + g_*(1.0f - beta1);
+ v[i] = v[i]*beta2 + g_*g_*(1.0f - beta2);
float mh = m[i]*beta1h;
float vh = v[i]*beta2h;
vh = sqrtf(vh) + eps;
}
}
- if (callback) {
- callback(callback_data, &sched);
+ fx = 0;
+ ggml_set_zero(opt->adam.g);
+ for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
+ if (callback) {
+ callback(callback_data, accum_step, &sched, &cancel);
+ if (cancel) {
+ return GGML_OPT_CANCEL;;
+ }
+ }
+ // ggml_graph_reset (gf);
+ ggml_set_f32 (f->grad, 1.0f);
+ ggml_graph_compute(gb, &cplan);
+ ggml_opt_acc_grad(np, ps, g, accum_norm);
+ fx += ggml_get_f32_1d(f, 0);
}
+ fx *= accum_norm;
- ggml_graph_reset (gf);
- ggml_set_f32 (f->grad, 1.0f);
-
- ggml_graph_compute(gb, &cplan);
-
- const float fx = ggml_get_f32_1d(f, 0);
opt->loss_after = fx;
-
// check convergence
if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
GGML_PRINT_DEBUG("converged\n");
float * step,
const float * xp,
struct ggml_tensor * f,
- struct ggml_cgraph * gf,
struct ggml_cgraph * gb,
struct ggml_cplan * cplan,
const int np,
struct ggml_tensor * ps[],
+ bool * cancel,
ggml_opt_callback callback,
void * callback_data) {
int count = 0;
const float dec = 0.5f;
const float inc = 2.1f;
+ const int n_accum = MAX(1, params->n_gradient_accumulation);
+ const float accum_norm = 1.0f / (float) n_accum;
+
if (*step <= 0.f) {
return GGML_LINESEARCH_INVALID_PARAMETERS;
}
dgtest = params->lbfgs.ftol*dginit;
while (true) {
- if (callback) {
- // LBFG-S does not support learning rate -> ignore learning schedule
- float sched = 0;
- callback(callback_data, &sched);
- }
-
ggml_vec_cpy_f32(nx, x, xp);
ggml_vec_mad_f32(nx, x, d, *step);
{
ggml_opt_set_params(np, ps, x);
- ggml_graph_reset (gf);
- ggml_set_f32 (f->grad, 1.0f);
-
- ggml_graph_compute(gb, cplan);
-
- ggml_opt_get_grad(np, ps, g);
+ *fx = 0;
+ memset(g, 0, sizeof(float)*nx);
+ for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
+ if (callback) {
+ // LBFG-S does not support learning rate -> ignore learning schedule
+ float sched = 0;
+ callback(callback_data, accum_step, &sched, cancel);
+ if (*cancel) {
+ return GGML_OPT_CANCEL;
+ }
+ }
+ // ggml_graph_reset (gf);
+ ggml_set_f32 (f->grad, 1.0f);
+ ggml_graph_compute(gb, cplan);
+ ggml_opt_acc_grad(np, ps, g, accum_norm);
+ *fx += ggml_get_f32_1d(f, 0);
+ }
+ *fx *= accum_norm;
- *fx = ggml_get_f32_1d(f, 0);
}
++count;
(*step) *= width;
}
- return GGML_LINESEARCH_FAIL;
+ GGML_UNREACHABLE();
}
static enum ggml_opt_result ggml_opt_lbfgs(
float * pf = params.past > 0 ? opt->lbfgs.pf->data : NULL; // past function values
+ const int n_accum = MAX(1, params.n_gradient_accumulation);
+ const float accum_norm = 1.0f / (float) n_accum;
+
float fx = 0.0f; // cost function value
float xnorm = 0.0f; // ||x||
float gnorm = 0.0f; // ||g||
float * lm_s = opt->lbfgs.lms->data;
float * lm_y = opt->lbfgs.lmy->data;
- if (callback) {
- // LBFG-S does not support learning rate -> ignore learning schedule
- float sched = 0;
- callback(callback_data, &sched);
- }
+ bool cancel = false;
// evaluate the function value and its gradient
{
ggml_opt_set_params(np, ps, x);
- ggml_graph_reset (gf);
- ggml_set_f32 (f->grad, 1.0f);
-
- ggml_graph_compute(gb, &cplan);
-
- ggml_opt_get_grad(np, ps, g);
-
- fx = ggml_get_f32_1d(f, 0);
+ fx = 0;
+ memset(g, 0, sizeof(float)*nx);
+ for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
+ if (callback) {
+ // LBFG-S does not support learning rate -> ignore learning schedule
+ float sched = 0;
+ callback(callback_data, accum_step, &sched, &cancel);
+ if (cancel) {
+ return GGML_OPT_CANCEL;
+ }
+ }
+ // ggml_graph_reset (gf);
+ ggml_set_f32 (f->grad, 1.0f);
+ ggml_graph_compute(gb, &cplan);
+ ggml_opt_acc_grad(np, ps, g, accum_norm);
+ fx += ggml_get_f32_1d(f, 0);
+ }
+ fx *= accum_norm;
opt->loss_before = fx;
opt->loss_after = fx;
ggml_vec_cpy_f32(nx, xp, x);
ggml_vec_cpy_f32(nx, gp, g);
- ls = linesearch_backtracking(¶ms, nx, x, &fx, g, d, step, xp, f, gf, gb, &cplan, np, ps, callback, callback_data);
+ // TODO: instead of passing &cancel here, use the return code of the linesearch
+ // to determine if the optimization should be cancelled
+ // this is a simple change, but not doing this atm, since I don't have a nice
+ // way to test and don't want to break something with so many changes lined up
+ ls = linesearch_backtracking(¶ms, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data);
+ if (cancel) {
+ return GGML_OPT_CANCEL;
+ }
if (ls < 0) {
// linesearch failed - go back to the previous point and return
step[0] = 1.0;
}
- return GGML_OPT_DID_NOT_CONVERGE;
+ GGML_UNREACHABLE();
}
struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
case GGML_OPT_ADAM:
{
result = (struct ggml_opt_params) {
- .type = GGML_OPT_ADAM,
- .n_threads = 1,
- .past = 0,
- .delta = 1e-5f,
+ .type = GGML_OPT_ADAM,
+ .graph_size = GGML_DEFAULT_GRAPH_SIZE,
+ .n_threads = 1, // FIXME: GGML_DEFAULT_N_THREADS ?
+ .past = 0,
+ .delta = 1e-5f,
.max_no_improvement = 100,
.print_forward_graph = true,
.print_backward_graph = true,
+ .n_gradient_accumulation = 1,
+
.adam = {
.n_iter = 10000,
.sched = 1.000f,
case GGML_OPT_LBFGS:
{
result = (struct ggml_opt_params) {
- .type = GGML_OPT_LBFGS,
- .n_threads = 1,
- .past = 0,
- .delta = 1e-5f,
+ .type = GGML_OPT_LBFGS,
+ .graph_size = GGML_DEFAULT_GRAPH_SIZE,
+ .n_threads = 1,
+ .past = 0,
+ .delta = 1e-5f,
.max_no_improvement = 0,
.print_forward_graph = true,
.print_backward_graph = true,
+ .n_gradient_accumulation = 1,
+
.lbfgs = {
.m = 6,
.n_iter = 100,
opt->iter = 0;
opt->nx = nx;
opt->just_initialized = true;
+ if (opt->ctx == NULL) {
+ struct ggml_init_params ctx_opt_params;
+ if (opt->params.type == GGML_OPT_ADAM) {
+ ctx_opt_params.mem_size = GGML_MEM_ALIGN*3 + ggml_tensor_overhead()*3 + ggml_type_size(GGML_TYPE_F32)*nx*3;
+ if (opt->params.past > 0) {
+ ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
+ }
+ } else if (opt->params.type == GGML_OPT_LBFGS) {
+ ctx_opt_params.mem_size = GGML_MEM_ALIGN*9 + ggml_tensor_overhead()*9 + ggml_type_size(GGML_TYPE_F32)*(nx*5 + opt->params.lbfgs.m*2 + nx*opt->params.lbfgs.m*2);
+ if (opt->params.past > 0) {
+ ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
+ }
+ }
+ ctx_opt_params.mem_buffer = NULL;
+ ctx_opt_params.no_alloc = false;
+
+ opt->ctx = ggml_init(ctx_opt_params);
+ }
switch (opt->params.type) {
case GGML_OPT_ADAM:
{
- opt->adam.m = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
- opt->adam.v = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
+ opt->adam.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
+ opt->adam.m = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
+ opt->adam.v = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
opt->adam.pf = params.past > 0
- ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)
+ ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
: NULL;
ggml_set_zero(opt->adam.m);
ggml_set_zero(opt->adam.v);
} break;
case GGML_OPT_LBFGS:
{
- opt->lbfgs.x = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
- opt->lbfgs.xp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
- opt->lbfgs.g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
- opt->lbfgs.gp = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
- opt->lbfgs.d = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, nx);
+ opt->lbfgs.x = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
+ opt->lbfgs.xp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
+ opt->lbfgs.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
+ opt->lbfgs.gp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
+ opt->lbfgs.d = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
opt->lbfgs.pf = params.past > 0
- ? ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.past)
+ ? ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.past)
: NULL;
- opt->lbfgs.lmal = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.lbfgs.m);
- opt->lbfgs.lmys = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, params.lbfgs.m);
- opt->lbfgs.lms = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
- opt->lbfgs.lmy = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
+ opt->lbfgs.lmal = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
+ opt->lbfgs.lmys = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, params.lbfgs.m);
+ opt->lbfgs.lms = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
+ opt->lbfgs.lmy = ggml_new_tensor_2d(opt->ctx, GGML_TYPE_F32, nx, params.lbfgs.m);
ggml_set_zero(opt->lbfgs.x);
ggml_set_zero(opt->lbfgs.xp);
ggml_set_zero(opt->lbfgs.g);
struct ggml_tensor * f) {
// build forward + backward compute graphs
- struct ggml_tensor * gfbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / ggml_type_size(GGML_TYPE_I32)+ (sizeof(struct ggml_cgraph) % ggml_type_size(GGML_TYPE_I32) ? 1 : 0));
- struct ggml_tensor * gbbuf = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, sizeof(struct ggml_cgraph) / ggml_type_size(GGML_TYPE_I32)+ (sizeof(struct ggml_cgraph) % ggml_type_size(GGML_TYPE_I32) ? 1 : 0));
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx, opt->params.graph_size, true);
+ ggml_build_forward_expand(gf, f);
- struct ggml_cgraph * gf = (struct ggml_cgraph *) gfbuf->data;
- struct ggml_cgraph * gb = (struct ggml_cgraph *) gbbuf->data;
-
- *gf = ggml_build_forward (f);
- *gb = ggml_build_backward(ctx, gf, true);
+ struct ggml_cgraph * gb = ggml_graph_dup(ctx, gf);
+ ggml_build_backward_expand(ctx, gf, gb, true);
return ggml_opt_resume_g(ctx, opt, f, gf, gb, NULL, NULL);
}
block_q8_0 * block = (block_q8_0*)dst + start / QK8_0;
result = ggml_quantize_q8_0(src + start, block, n, n, hist);
} break;
-#ifdef GGML_USE_K_QUANTS
case GGML_TYPE_Q2_K:
{
GGML_ASSERT(start % QK_K == 0);
block_q6_K * block = (block_q6_K*)dst + start / QK_K;
result = ggml_quantize_q6_K(src + start, block, n, n, hist);
} break;
-#endif
case GGML_TYPE_F16:
{
int elemsize = sizeof(ggml_fp16_t);
};
struct gguf_header {
- uint32_t magic;
+ char magic[4];
uint32_t version;
uint64_t n_tensors; // GGUFv2
uint64_t n_kv; // GGUFv2
return n == size;
}
-// NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
-static bool gguf_fread_str_cur(FILE * file, struct gguf_str * p, size_t * offset) {
+static bool gguf_fread_str(FILE * file, struct gguf_str * p, size_t * offset) {
p->n = 0;
p->data = NULL;
return ok;
}
-static bool gguf_fread_str_v1(FILE * file, struct gguf_str * p, size_t * offset) {
- p->n = 0;
- p->data = NULL;
-
- bool ok = true;
-
- uint32_t n = 0;
- ok = ok && gguf_fread_el(file, &n, sizeof(n), offset); p->data = calloc(n + 1, 1); p->n = n;
- ok = ok && gguf_fread_el(file, p->data, p->n, offset);
-
- return ok;
-}
-
struct gguf_context * gguf_init_empty(void) {
struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context));
- ctx->header.magic = GGUF_MAGIC;
+ memcpy(ctx->header.magic, GGUF_MAGIC, sizeof(ctx->header.magic));
ctx->header.version = GGUF_VERSION;
ctx->header.n_tensors = 0;
ctx->header.n_kv = 0;
// offset from start of file
size_t offset = 0;
- uint32_t magic = 0;
+ char magic[4];
// check the magic before making allocations
{
gguf_fread_el(file, &magic, sizeof(magic), &offset);
- if (magic != GGUF_MAGIC) {
- fprintf(stderr, "%s: invalid magic number %08x\n", __func__, magic);
- fclose(file);
- return NULL;
+ for (uint32_t i = 0; i < sizeof(magic); i++) {
+ if (magic[i] != GGUF_MAGIC[i]) {
+ fprintf(stderr, "%s: invalid magic characters %s.\n", __func__, magic);
+ fclose(file);
+ return NULL;
+ }
}
}
// read the header
{
- ctx->header.magic = magic;
+ strncpy(ctx->header.magic, magic, 4);
+
ctx->kv = NULL;
ctx->infos = NULL;
ctx->data = NULL;
ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset);
+ ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
+ ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset);
if (ctx->header.version == 1) {
- // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
- uint32_t n_tensors = 0;
- uint32_t n_kv = 0;
-
- ok = ok && gguf_fread_el(file, &n_tensors, sizeof(n_tensors), &offset);
- ok = ok && gguf_fread_el(file, &n_kv, sizeof(n_kv), &offset);
-
- ctx->header.n_tensors = n_tensors;
- ctx->header.n_kv = n_kv;
- } else {
- ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
- ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset);
+ fprintf(stderr, "%s: GGUFv1 is no longer supported. please use a more up-to-date version\n", __func__);
+ fclose(file);
+ gguf_free(ctx);
+ return NULL;
}
if (!ok) {
}
}
- // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
- bool (* gguf_fread_str)(FILE *, struct gguf_str *, size_t *) = gguf_fread_str_cur;
- if (ctx->header.version == 1) {
- gguf_fread_str = gguf_fread_str_v1;
- }
-
// read the kv pairs
{
ctx->kv = malloc(ctx->header.n_kv * sizeof(struct gguf_kv));
case GGUF_TYPE_ARRAY:
{
ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset);
-
- if (ctx->header.version == 1) {
- // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
- uint32_t n = 0;
- ok = ok && gguf_fread_el(file, &n, sizeof(n), &offset);
- kv->value.arr.n = n;
- } else {
- ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset);
- }
+ ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset);
switch (kv->value.arr.type) {
case GGUF_TYPE_UINT8:
} break;
case GGUF_TYPE_ARRAY:
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
- };
+ }
} break;
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
- };
+ }
if (!ok) {
break;
ok = ok && gguf_fread_str(file, &info->name, &offset);
ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset);
for (uint32_t j = 0; j < info->n_dims; ++j) {
- if (ctx->header.version == 1) {
- // NOTE: temporary handling of GGUFv1 >> remove after Oct 2023
- uint32_t t = 0;
- ok = ok && gguf_fread_el(file, &t, sizeof(t), &offset);
- info->ne[j] = t;
- } else {
- ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
- }
+ ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
}
ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
return keyfound;
}
-const char * gguf_get_key(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].key.data;
+const char * gguf_get_key(const struct gguf_context * ctx, int key_id) {
+ return ctx->kv[key_id].key.data;
}
-enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].type;
+enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) {
+ return ctx->kv[key_id].type;
}
-enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.arr.type;
+enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
+ return ctx->kv[key_id].value.arr.type;
}
-const void * gguf_get_arr_data(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.arr.data;
+const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
+ return ctx->kv[key_id].value.arr.data;
}
const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
struct gguf_kv * kv = &ctx->kv[key_id];
struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
return str->data;
}
-int gguf_get_arr_n(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.arr.n;
+int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
+ return ctx->kv[key_id].value.arr.n;
}
-uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.uint8;
+uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8);
+ return ctx->kv[key_id].value.uint8;
}
-int8_t gguf_get_val_i8(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.int8;
+int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8);
+ return ctx->kv[key_id].value.int8;
}
-uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.uint16;
+uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16);
+ return ctx->kv[key_id].value.uint16;
}
-int16_t gguf_get_val_i16(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.int16;
+int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16);
+ return ctx->kv[key_id].value.int16;
}
-uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.uint32;
+uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32);
+ return ctx->kv[key_id].value.uint32;
}
-int32_t gguf_get_val_i32(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.int32;
+int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32);
+ return ctx->kv[key_id].value.int32;
}
-float gguf_get_val_f32(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.float32;
+float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32);
+ return ctx->kv[key_id].value.float32;
}
-uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.uint64;
+uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64);
+ return ctx->kv[key_id].value.uint64;
}
-int64_t gguf_get_val_i64(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.int64;
+int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64);
+ return ctx->kv[key_id].value.int64;
}
-double gguf_get_val_f64(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.float64;
+double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64);
+ return ctx->kv[key_id].value.float64;
}
-bool gguf_get_val_bool(const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.bool_;
+bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL);
+ return ctx->kv[key_id].value.bool_;
}
-const char * gguf_get_val_str (const struct gguf_context * ctx, int i) {
- return ctx->kv[i].value.str.data;
+const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) {
+ GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING);
+ return ctx->kv[key_id].value.str.data;
}
int gguf_get_n_tensors(const struct gguf_context * ctx) {
} break;
case GGUF_TYPE_ARRAY:
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break;
- };
+ }
} break;
case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type");
- };
+ }
}
// write tensor infos
// {
// ...
//
-// struct ggml_cgraph gf = ggml_build_forward(f);
+// struct ggml_cgraph * gf = ggml_new_graph(ctx);
+// ggml_build_forward_expand(gf, f);
//
// // set the input variable and parameter values
// ggml_set_f32(x, 2.0f);
#define GGML_QNT_VERSION 2 // bump this on quantization format changes
#define GGML_QNT_VERSION_FACTOR 1000 // do not change this
-#define GGML_MAX_DIMS 4
-#define GGML_MAX_NODES 4096
-#define GGML_MAX_PARAMS 256
-#define GGML_MAX_CONTEXTS 64
-#define GGML_MAX_SRC 6
-#define GGML_MAX_NAME 64
-#define GGML_MAX_OP_PARAMS 32
-#define GGML_DEFAULT_N_THREADS 4
-
+#define GGML_MAX_DIMS 4
+#define GGML_MAX_PARAMS 1024
+#define GGML_MAX_CONTEXTS 64
+#define GGML_MAX_SRC 6
+#define GGML_MAX_NAME 64
+#define GGML_MAX_OP_PARAMS 64
+#define GGML_DEFAULT_N_THREADS 4
+#define GGML_DEFAULT_GRAPH_SIZE 2048
#if UINTPTR_MAX == 0xFFFFFFFF
#define GGML_MEM_ALIGN 4
#else
#define GGML_EXIT_SUCCESS 0
#define GGML_EXIT_ABORTED 1
-#define GGUF_MAGIC 0x46554747 // "GGUF"
-#define GGUF_VERSION 2
+#define GGUF_MAGIC "GGUF"
+
+#define GGUF_VERSION 3
#define GGUF_DEFAULT_ALIGNMENT 32
do { \
if (!(x)) { \
fprintf(stderr, "GGML_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
- abort(); \
+ fflush(stderr); \
+ fflush(stdout); \
+ ggml_print_backtrace(); \
+ exit(1); \
} \
} while (0)
+#ifndef NDEBUG
+#define GGML_UNREACHABLE() GGML_ASSERT(!"statement should not be reached")
+#elif defined(__GNUC__)
+#define GGML_UNREACHABLE() __builtin_unreachable()
+#else
+#define GGML_UNREACHABLE() ((void) 0)
+#endif
+
// used to copy the number of elements and stride in bytes of tensors into local variables.
// main purpose is to reduce code duplication and improve readability.
//
GGML_TYPE_COUNT,
};
- enum ggml_backend {
+ enum ggml_backend_type {
GGML_BACKEND_CPU = 0,
GGML_BACKEND_GPU = 10,
GGML_BACKEND_GPU_SPLIT = 20,
GGML_OP_ALIBI,
GGML_OP_CLAMP,
GGML_OP_CONV_1D,
+ GGML_OP_CONV_1D_STAGE_0, // internal
+ GGML_OP_CONV_1D_STAGE_1, // internal
+ GGML_OP_CONV_TRANSPOSE_1D,
GGML_OP_CONV_2D,
+ GGML_OP_CONV_2D_STAGE_0, // internal
+ GGML_OP_CONV_2D_STAGE_1, // internal
GGML_OP_CONV_TRANSPOSE_2D,
GGML_OP_POOL_1D,
GGML_OP_POOL_2D,
GGML_UNARY_OP_GELU,
GGML_UNARY_OP_GELU_QUICK,
GGML_UNARY_OP_SILU,
+ GGML_UNARY_OP_LEAKY
};
enum ggml_object_type {
GGML_OBJECT_WORK_BUFFER
};
+ enum ggml_log_level {
+ GGML_LOG_LEVEL_ERROR = 2,
+ GGML_LOG_LEVEL_WARN = 3,
+ GGML_LOG_LEVEL_INFO = 4
+ };
+
// ggml object
struct ggml_object {
size_t offs;
// n-dimensional tensor
struct ggml_tensor {
- enum ggml_type type;
- enum ggml_backend backend;
+ enum ggml_type type;
+ enum ggml_backend_type backend;
+
+ struct ggml_backend_buffer * buffer;
int n_dims;
int64_t ne[GGML_MAX_DIMS]; // number of elements
size_t nb[GGML_MAX_DIMS]; // stride in bytes:
- // nb[0] = sizeof(type)
- // nb[1] = nb[0] * ne[0] + padding
+ // nb[0] = ggml_type_size(type)
+ // nb[1] = nb[0] * (ne[0] / ggml_blck_size(type)) + padding
// nb[i] = nb[i-1] * ne[i-1]
// compute data
void * extra; // extra things e.g. for ggml-cuda.cu
- char padding[4];
+ char padding[12];
};
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor);
int n_threads;
- // the `n_tasks` of nodes, 1:1 mapping to cgraph nodes
- int n_tasks[GGML_MAX_NODES];
-
// abort ggml_graph_compute when true
bool (*abort_callback)(void * data);
void * abort_callback_data;
};
- // next prime after GGML_MAX_NODES
- // #define GGML_GRAPH_HASHTABLE_SIZE 4099
- // next prime after GGML_MAX_NODES * 2 (nodes + leafs)
- #define GGML_GRAPH_HASHTABLE_SIZE 8273
+ enum ggml_cgraph_eval_order {
+ GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0,
+ GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT,
+ GGML_CGRAPH_EVAL_ORDER_COUNT
+ };
+
+ struct ggml_hash_set {
+ size_t size;
+ struct ggml_tensor ** keys;
+ };
// computation graph
struct ggml_cgraph {
+ int size;
int n_nodes;
int n_leafs;
- struct ggml_tensor * nodes[GGML_MAX_NODES];
- struct ggml_tensor * grads[GGML_MAX_NODES];
- struct ggml_tensor * leafs[GGML_MAX_NODES];
+ struct ggml_tensor ** nodes;
+ struct ggml_tensor ** grads;
+ struct ggml_tensor ** leafs;
+
+ struct ggml_hash_set visited_hash_table;
- void * visited_hash_table[GGML_GRAPH_HASHTABLE_SIZE];
+ enum ggml_cgraph_eval_order order;
// performance
int perf_runs;
int64_t perf_time_us;
};
- static const size_t GGML_GRAPH_SIZE = sizeof(struct ggml_cgraph);
-
// scratch buffer
struct ggml_scratch {
size_t offs;
GGML_API int64_t ggml_cycles(void);
GGML_API int64_t ggml_cycles_per_ms(void);
+ GGML_API void ggml_print_backtrace(void);
+
GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems
GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
GGML_API struct ggml_tensor * ggml_dup_tensor (struct ggml_context * ctx, const struct ggml_tensor * src);
GGML_API struct ggml_tensor * ggml_view_tensor(struct ggml_context * ctx, struct ggml_tensor * src);
+ // Context tensor enumeration and lookup
+ GGML_API struct ggml_tensor * ggml_get_first_tensor(struct ggml_context * ctx);
+ GGML_API struct ggml_tensor * ggml_get_next_tensor (struct ggml_context * ctx, struct ggml_tensor * tensor);
GGML_API struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * name);
GGML_API struct ggml_tensor * ggml_set_zero(struct ggml_tensor * tensor);
GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
+ // Converts a flat index into coordinates
+ GGML_API void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3);
+
GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
+ GGML_API int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
+ GGML_API void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value);
+
GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
+ GGML_API float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
+ GGML_API void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
+
GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);
struct ggml_tensor * a,
struct ggml_tensor * b);
+ GGML_API struct ggml_tensor * ggml_add_cast(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ enum ggml_type type);
+
GGML_API struct ggml_tensor * ggml_add1(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * a,
struct ggml_tensor * b);
+ // sums repetitions in a into shape of b
GGML_API struct ggml_tensor * ggml_repeat_back(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_context * ctx,
struct ggml_tensor * a);
+ GGML_API struct ggml_tensor * ggml_leaky(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a);
+
GGML_API struct ggml_tensor * ggml_relu_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a);
struct ggml_tensor * b,
float eps);
- // A: n columns, m rows
- // B: n columns, p rows (i.e. we transpose it internally)
- // result is m columns, p rows
+ // A: k columns, n rows => [ne03, ne02, n, k]
+ // B: k columns, m rows (i.e. we transpose it internally) => [ne03 * x, ne02 * y, m, k]
+ // result is n columns, m rows => [ne03 * x, ne02 * y, m, n]
GGML_API struct ggml_tensor * ggml_mul_mat(
struct ggml_context * ctx,
struct ggml_tensor * a,
size_t nb1,
size_t offset);
-
// a -> b, return view(b)
GGML_API struct ggml_tensor * ggml_cpy(
struct ggml_context * ctx,
struct ggml_context * ctx,
struct ggml_tensor * a);
+ // make contiguous, with new shape
+ GGML_API struct ggml_tensor * ggml_cont_1d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int64_t ne0);
+
+ GGML_API struct ggml_tensor * ggml_cont_2d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int64_t ne0,
+ int64_t ne1);
+
+ GGML_API struct ggml_tensor * ggml_cont_3d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int64_t ne0,
+ int64_t ne1,
+ int64_t ne2);
+
+ GGML_API struct ggml_tensor * ggml_cont_4d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ int64_t ne0,
+ int64_t ne1,
+ int64_t ne2,
+ int64_t ne3);
+
// return view(a), b specifies the new shape
// TODO: when we start computing gradient, make a copy instead of view
GGML_API struct ggml_tensor * ggml_reshape(
struct ggml_tensor * b);
// rotary position embedding
- // if mode & 1 == 1, skip n_past elements
+ // if mode & 1 == 1, skip n_past elements (DEPRECATED)
// if mode & 2 == 1, GPT-NeoX style
// if mode & 4 == 1, ChatGLM style
- // TODO: avoid creating a new tensor every time
+ //
+ // b is an int32 vector with size a->ne[2], it contains the positions
GGML_API struct ggml_tensor * ggml_rope(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
int mode,
int n_ctx);
GGML_API struct ggml_tensor * ggml_rope_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
int mode,
int n_ctx);
GGML_API struct ggml_tensor * ggml_rope_custom(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
int mode,
int n_ctx,
+ int n_orig_ctx,
float freq_base,
- float freq_scale);
+ float freq_scale,
+ float ext_factor,
+ float attn_factor,
+ float beta_fast,
+ float beta_slow);
// in-place, returns view(a)
GGML_API struct ggml_tensor * ggml_rope_custom_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
int mode,
int n_ctx,
+ int n_orig_ctx,
float freq_base,
- float freq_scale);
+ float freq_scale,
+ float ext_factor,
+ float attn_factor,
+ float beta_fast,
+ float beta_slow);
+
+ // compute correction dims for YaRN RoPE scaling
+ void ggml_rope_yarn_corr_dims(
+ int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]);
// xPos RoPE, in-place, returns view(a)
GGML_API struct ggml_tensor * ggml_rope_xpos_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
float base,
bool down);
GGML_API struct ggml_tensor * ggml_rope_back(
struct ggml_context * ctx,
struct ggml_tensor * a,
- int n_past,
+ struct ggml_tensor * b,
int n_dims,
int mode,
int n_ctx,
// alibi position embedding
// in-place, returns view(a)
- struct ggml_tensor * ggml_alibi(
+ GGML_API struct ggml_tensor * ggml_alibi(
struct ggml_context * ctx,
struct ggml_tensor * a,
int n_past,
// clamp
// in-place, returns view(a)
- struct ggml_tensor * ggml_clamp(
+ GGML_API struct ggml_tensor * ggml_clamp(
struct ggml_context * ctx,
struct ggml_tensor * a,
float min,
int s,
int d);
+ GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ int s0,
+ int p0,
+ int d0);
+
GGML_API struct ggml_tensor * ggml_conv_2d(
struct ggml_context * ctx,
struct ggml_tensor * a,
int s0, // stride
int p0); // padding
+ // the result will have 2*p0 padding for the first dimension
+ // and 2*p1 padding for the second dimension
GGML_API struct ggml_tensor * ggml_pool_2d(
struct ggml_context * ctx,
struct ggml_tensor * a,
int k1,
int s0,
int s1,
- int p0,
- int p1);
+ float p0,
+ float p1);
// nearest interpolate
// used in stable-diffusion
GGML_API void ggml_build_forward_expand (struct ggml_cgraph * cgraph, struct ggml_tensor * tensor);
GGML_API void ggml_build_backward_expand(struct ggml_context * ctx, struct ggml_cgraph * gf, struct ggml_cgraph * gb, bool keep);
- GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor);
- GGML_API struct ggml_cgraph ggml_build_backward(struct ggml_context * ctx, struct ggml_cgraph * gf, bool keep);
-
// graph allocation in a context
- GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx);
- GGML_API struct ggml_cgraph * ggml_build_forward_ctx(struct ggml_context * ctx, struct ggml_tensor * tensor);
+ GGML_API struct ggml_cgraph * ggml_new_graph (struct ggml_context * ctx); // size = GGML_DEFAULT_GRAPH_SIZE, grads = false
+ GGML_API struct ggml_cgraph * ggml_new_graph_custom (struct ggml_context * ctx, size_t size, bool grads);
+ GGML_API struct ggml_cgraph * ggml_graph_dup (struct ggml_context * ctx, struct ggml_cgraph * cgraph);
+ GGML_API struct ggml_cgraph * ggml_graph_view (struct ggml_context * ctx, struct ggml_cgraph * cgraph, int i0, int i1);
+ GGML_API void ggml_graph_cpy (struct ggml_cgraph * src, struct ggml_cgraph * dst);
+ GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph); // zero grads
+ GGML_API void ggml_graph_clear (struct ggml_cgraph * cgraph);
+
GGML_API size_t ggml_graph_overhead(void);
+ GGML_API size_t ggml_graph_overhead_custom(size_t size, bool grads);
// ggml_graph_plan() has to be called before ggml_graph_compute()
// when plan.work_size > 0, caller must allocate memory for plan.work_data
GGML_API struct ggml_cplan ggml_graph_plan (struct ggml_cgraph * cgraph, int n_threads /*= GGML_DEFAULT_N_THREADS*/);
- GGML_API int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
- GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph);
+ GGML_API int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan);
// same as ggml_graph_compute() but the work data is allocated as a part of the context
// note: the drawback of this API is that you must have ensured that the context has enough memory for the work data
GGML_API struct ggml_tensor * ggml_graph_get_tensor(struct ggml_cgraph * cgraph, const char * name);
- GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname);
- GGML_API struct ggml_cgraph ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval);
+ GGML_API void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname);
+ GGML_API struct ggml_cgraph * ggml_graph_import(const char * fname, struct ggml_context ** ctx_data, struct ggml_context ** ctx_eval);
// print info and performance information for the graph
GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph);
// dump the graph into a file using the dot format
GGML_API void ggml_graph_dump_dot(const struct ggml_cgraph * gb, const struct ggml_cgraph * gf, const char * filename);
+ // build gradient checkpointing backward graph gb for gf using provided checkpoints
+ // gb_tmp will contain original backward graph with rewritten backward process nodes,
+ // but without the second forward pass nodes.
+ GGML_API void ggml_build_backward_gradient_checkpointing(
+ struct ggml_context * ctx,
+ struct ggml_cgraph * gf,
+ struct ggml_cgraph * gb,
+ struct ggml_cgraph * gb_tmp,
+ struct ggml_tensor * * checkpoints,
+ int n_checkpoints);
//
// optimization
//
GGML_OPT_NO_CONTEXT,
GGML_OPT_INVALID_WOLFE,
GGML_OPT_FAIL,
+ GGML_OPT_CANCEL,
GGML_LINESEARCH_FAIL = -128,
GGML_LINESEARCH_MINIMUM_STEP,
GGML_LINESEARCH_INVALID_PARAMETERS,
};
- typedef void (*ggml_opt_callback)(void * data, float * sched);
+ typedef void (*ggml_opt_callback)(void * data, int accum_step, float * sched, bool * cancel);
+ typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data);
// optimization parameters
//
struct ggml_opt_params {
enum ggml_opt_type type;
+ size_t graph_size;
+
int n_threads;
// delta-based convergence test
bool print_forward_graph;
bool print_backward_graph;
+ int n_gradient_accumulation;
+
// ADAM parameters
struct {
int n_iter;
float loss_after;
struct {
+ struct ggml_tensor * g; // current gradient
struct ggml_tensor * m; // first moment
struct ggml_tensor * v; // second moment
struct ggml_tensor * pf; // past function values
// quantization
//
+ // TODO: these would probably get removed in favor of the more general ggml_quantize_chunk
GGML_API size_t ggml_quantize_q4_0(const float * src, void * dst, int n, int k, int64_t * hist);
GGML_API size_t ggml_quantize_q4_1(const float * src, void * dst, int n, int k, int64_t * hist);
GGML_API size_t ggml_quantize_q5_0(const float * src, void * dst, int n, int k, int64_t * hist);
GGML_API size_t ggml_quantize_q5_1(const float * src, void * dst, int n, int k, int64_t * hist);
GGML_API size_t ggml_quantize_q8_0(const float * src, void * dst, int n, int k, int64_t * hist);
+ GGML_API size_t ggml_quantize_q2_K(const float * src, void * dst, int n, int k, int64_t * hist);
+ GGML_API size_t ggml_quantize_q3_K(const float * src, void * dst, int n, int k, int64_t * hist);
+ GGML_API size_t ggml_quantize_q4_K(const float * src, void * dst, int n, int k, int64_t * hist);
+ GGML_API size_t ggml_quantize_q5_K(const float * src, void * dst, int n, int k, int64_t * hist);
+ GGML_API size_t ggml_quantize_q6_K(const float * src, void * dst, int n, int k, int64_t * hist);
+
GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);
//
GGML_API int gguf_get_n_kv(const struct gguf_context * ctx);
GGML_API int gguf_find_key(const struct gguf_context * ctx, const char * key);
- GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int i);
-
- GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int i);
- GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int i);
-
- // results are undefined if the wrong type is used for the key
- GGML_API uint8_t gguf_get_val_u8 (const struct gguf_context * ctx, int i);
- GGML_API int8_t gguf_get_val_i8 (const struct gguf_context * ctx, int i);
- GGML_API uint16_t gguf_get_val_u16 (const struct gguf_context * ctx, int i);
- GGML_API int16_t gguf_get_val_i16 (const struct gguf_context * ctx, int i);
- GGML_API uint32_t gguf_get_val_u32 (const struct gguf_context * ctx, int i);
- GGML_API int32_t gguf_get_val_i32 (const struct gguf_context * ctx, int i);
- GGML_API float gguf_get_val_f32 (const struct gguf_context * ctx, int i);
- GGML_API uint64_t gguf_get_val_u64 (const struct gguf_context * ctx, int i);
- GGML_API int64_t gguf_get_val_i64 (const struct gguf_context * ctx, int i);
- GGML_API double gguf_get_val_f64 (const struct gguf_context * ctx, int i);
- GGML_API bool gguf_get_val_bool(const struct gguf_context * ctx, int i);
- GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int i);
- GGML_API int gguf_get_arr_n (const struct gguf_context * ctx, int i);
- GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int i);
+ GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int key_id);
+
+ GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int key_id);
+ GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id);
+
+ // will abort if the wrong type is used for the key
+ GGML_API uint8_t gguf_get_val_u8 (const struct gguf_context * ctx, int key_id);
+ GGML_API int8_t gguf_get_val_i8 (const struct gguf_context * ctx, int key_id);
+ GGML_API uint16_t gguf_get_val_u16 (const struct gguf_context * ctx, int key_id);
+ GGML_API int16_t gguf_get_val_i16 (const struct gguf_context * ctx, int key_id);
+ GGML_API uint32_t gguf_get_val_u32 (const struct gguf_context * ctx, int key_id);
+ GGML_API int32_t gguf_get_val_i32 (const struct gguf_context * ctx, int key_id);
+ GGML_API float gguf_get_val_f32 (const struct gguf_context * ctx, int key_id);
+ GGML_API uint64_t gguf_get_val_u64 (const struct gguf_context * ctx, int key_id);
+ GGML_API int64_t gguf_get_val_i64 (const struct gguf_context * ctx, int key_id);
+ GGML_API double gguf_get_val_f64 (const struct gguf_context * ctx, int key_id);
+ GGML_API bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id);
+ GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int key_id);
+ GGML_API int gguf_get_arr_n (const struct gguf_context * ctx, int key_id);
+ GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id);
GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int key_id, int i);
GGML_API int gguf_get_n_tensors (const struct gguf_context * ctx);
enum ggml_type vec_dot_type;
} ggml_type_traits_t;
- ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);
+ GGML_API ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);
#ifdef __cplusplus
}
//#define WHISPER_USE_FLASH_ATTN
//#define WHISPER_USE_FLASH_FF
#define WHISPER_MAX_DECODERS 16
+#define WHISPER_MAX_NODES 4096
//
// ggml helpers
auto & meta = allocr.meta;
auto & data = allocr.data;
- meta.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead());
+ meta.resize(ggml_tensor_overhead()*WHISPER_MAX_NODES + ggml_graph_overhead());
alloc = ggml_allocr_new_measure(tensor_alignment);
struct ggml_context * ctx0 = ggml_init(params);
- ggml_cgraph * gf = ggml_new_graph(ctx0);
+ ggml_cgraph * gf = ggml_new_graph_custom(ctx0, WHISPER_MAX_NODES, false);
ggml_allocr * alloc = wstate.alloc_encode.alloc;
struct ggml_context * ctx0 = ggml_init(params);
- ggml_cgraph * gf = ggml_new_graph(ctx0);
+ ggml_cgraph * gf = ggml_new_graph_custom(ctx0, WHISPER_MAX_NODES, false);
ggml_allocr * alloc = wstate.alloc_decode.alloc;
/*.encoder_begin_callback =*/ nullptr,
/*.encoder_begin_callback_user_data =*/ nullptr,
- /*.abort_callback =*/ nullptr,
- /*.abort_callback_user_data =*/ nullptr,
-
/*.logits_filter_callback =*/ nullptr,
/*.logits_filter_callback_user_data =*/ nullptr,
};
// initial prompt
if (!params.prompt_tokens && params.initial_prompt) {
- prompt_tokens.resize(2048);
+ prompt_tokens.resize(1024);
prompt_tokens.resize(whisper_tokenize(ctx, params.initial_prompt, prompt_tokens.data(), prompt_tokens.size()));
params.prompt_tokens = prompt_tokens.data();
params.prompt_n_tokens = prompt_tokens.size();
// b: N*N*sizeof(float)
// c: N*N*sizeof(float)
// when F16 is used, there is an extra work buffer of size N*N*sizeof(float)
- std::vector<uint8_t> buf(3llu*N_max*N_max*sizeof(float) + 3*ggml_tensor_overhead());
+ std::vector<uint8_t> buf(3llu*N_max*N_max*sizeof(float) + 3*ggml_tensor_overhead() + ggml_graph_overhead());
std::vector<uint8_t> work;
// put a bunch of random data in the buffer
struct ggml_tensor * c = ggml_mul_mat(ctx0, a, b);
- struct ggml_cgraph gf = ggml_build_forward(c);
+ struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+
+ ggml_build_forward_expand(gf, c);
double tsum = 0.0;
// heat-up
- ggml_graph_compute_helper(work, &gf, n_threads, nullptr , nullptr);
+ ggml_graph_compute_helper(work, gf, n_threads, nullptr, nullptr);
for (int i = 0; i < n_max; ++i) {
const int64_t t0 = ggml_time_us();
- ggml_graph_compute_helper(work, &gf, n_threads, nullptr, nullptr);
+ ggml_graph_compute_helper(work, gf, n_threads, nullptr, nullptr);
const int64_t t1 = ggml_time_us();