-#include "llama-util.h"
#include "llama.h"
#include "ggml.h"
+
+#include "ggml-alloc.h"
+
#ifdef GGML_USE_CUBLAS
-#include "ggml-cuda.h"
+# include "ggml-cuda.h"
+#elif defined(GGML_USE_CLBLAST)
+# include "ggml-opencl.h"
+#endif
+
+#ifdef GGML_USE_METAL
+# include "ggml-metal.h"
+#endif
+#ifdef GGML_USE_MPI
+# include "ggml-mpi.h"
+#endif
+#ifdef GGML_USE_K_QUANTS
+# ifndef QK_K
+# ifdef GGML_QKK_64
+# define QK_K 64
+# else
+# define QK_K 256
+# endif
+# endif
+#endif
+
+#ifdef __has_include
+ #if __has_include(<unistd.h>)
+ #include <unistd.h>
+ #if defined(_POSIX_MAPPED_FILES)
+ #include <sys/mman.h>
+ #endif
+ #if defined(_POSIX_MEMLOCK_RANGE)
+ #include <sys/resource.h>
+ #endif
+ #endif
#endif
+#if defined(_WIN32)
+ #define WIN32_LEAN_AND_MEAN
+ #ifndef NOMINMAX
+ #define NOMINMAX
+ #endif
+ #include <windows.h>
+ #include <io.h>
+ #include <stdio.h> // for _fseeki64
+#endif
+
+#include <algorithm>
#include <array>
-#include <ctime>
+#include <cassert>
#include <cinttypes>
+#include <climits>
+#include <cstdarg>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <cstring>
+#include <ctime>
#include <fstream>
-#include <random>
+#include <initializer_list>
#include <map>
-#include <unordered_map>
-#include <queue>
-#include <cassert>
-#include <cstring>
-#include <climits>
#include <memory>
-#include <algorithm>
-#include <initializer_list>
-#include <thread>
-#include <atomic>
#include <mutex>
-#include <sstream>
#include <numeric>
+#include <queue>
+#include <random>
+#include <regex>
+#include <sstream>
+#include <thread>
+#include <unordered_map>
-#define LLAMA_USE_SCRATCH
-#define LLAMA_MAX_SCRATCH_BUFFERS 16
+#if defined(_MSC_VER)
+#pragma warning(disable: 4244 4267) // possible loss of data
+#endif
-// available llama models
-enum e_model {
- MODEL_UNKNOWN,
- MODEL_7B,
- MODEL_13B,
- MODEL_30B,
- MODEL_65B,
-};
+#ifdef __GNUC__
+#ifdef __MINGW32__
+#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
+#else
+#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
+#endif
+#else
+#define LLAMA_ATTRIBUTE_FORMAT(...)
+#endif
+//
+// logging
+//
-static const size_t MB = 1024*1024;
+LLAMA_ATTRIBUTE_FORMAT(2, 3)
+static void llama_log_internal (llama_log_level level, const char* format, ...);
+static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data);
-// computed for n_ctx == 2048
-// TODO: dynamically determine these sizes
-// needs modifications in ggml
+#define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__)
+#define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__)
+#define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__)
-static const std::map<e_model, size_t> & MEM_REQ_SCRATCH0()
-{
- static std::map<e_model, size_t> k_sizes = {
- { MODEL_7B, 512ull * MB },
- { MODEL_13B, 512ull * MB },
- { MODEL_30B, 512ull * MB },
- { MODEL_65B, 1024ull * MB },
- };
- return k_sizes;
-}
+//
+// helpers
+//
-static const std::map<e_model, size_t> & MEM_REQ_SCRATCH1()
-{
- static std::map<e_model, size_t> k_sizes = {
- { MODEL_7B, 512ull * MB },
- { MODEL_13B, 512ull * MB },
- { MODEL_30B, 512ull * MB },
- { MODEL_65B, 1024ull * MB },
- };
- return k_sizes;
+static size_t utf8_len(char src) {
+ const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
+ uint8_t highbits = static_cast<uint8_t>(src) >> 4;
+ return lookup[highbits];
}
-// 2*n_embd*n_ctx*n_layer*sizeof(float16)
-static const std::map<e_model, size_t> & MEM_REQ_KV_SELF()
-{
- static std::map<e_model, size_t> k_sizes = {
- { MODEL_7B, 1026ull * MB },
- { MODEL_13B, 1608ull * MB },
- { MODEL_30B, 3124ull * MB },
- { MODEL_65B, 5120ull * MB },
- };
- return k_sizes;
-}
-
-// this is mostly needed for temporary mul_mat buffers to dequantize the data
-// not actually needed if BLAS is disabled
-static const std::map<e_model, size_t> & MEM_REQ_EVAL()
-{
- static std::map<e_model, size_t> k_sizes = {
- { MODEL_7B, 768ull * MB },
- { MODEL_13B, 1024ull * MB },
- { MODEL_30B, 1280ull * MB },
- { MODEL_65B, 1536ull * MB },
- };
- return k_sizes;
+void replace_all(std::string & s, const std::string & search, const std::string & replace) {
+ std::string result;
+ for (size_t pos = 0; ; pos += search.length()) {
+ auto new_pos = s.find(search, pos);
+ if (new_pos == std::string::npos) {
+ result += s.substr(pos, s.size() - pos);
+ break;
+ }
+ result += s.substr(pos, new_pos - pos) + replace;
+ pos = new_pos;
+ }
+ s = std::move(result);
}
+#ifdef GGML_USE_CPU_HBM
+#include <hbwmalloc.h>
+#endif
-// default hparams (LLaMA 7B)
-struct llama_hparams {
- uint32_t n_vocab = 32000;
- uint32_t n_ctx = 512; // this is provided as user input?
- uint32_t n_embd = 4096;
- uint32_t n_mult = 256;
- uint32_t n_head = 32;
- uint32_t n_layer = 32;
- uint32_t n_rot = 64;
- enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16;
-
- bool operator!=(const llama_hparams & other) const {
- return static_cast<bool>(memcmp(this, &other, sizeof(llama_hparams)));
+static void zeros(std::ofstream & file, size_t n) {
+ char zero = 0;
+ for (size_t i = 0; i < n; ++i) {
+ file.write(&zero, 1);
}
-};
+}
-struct llama_layer {
- // normalization
- struct ggml_tensor * attention_norm;
+LLAMA_ATTRIBUTE_FORMAT(1, 2)
+static std::string format(const char * fmt, ...) {
+ va_list ap;
+ va_list ap2;
+ va_start(ap, fmt);
+ va_copy(ap2, ap);
+ int size = vsnprintf(NULL, 0, fmt, ap);
+ GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
+ std::vector<char> buf(size + 1);
+ int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
+ GGML_ASSERT(size2 == size);
+ va_end(ap2);
+ va_end(ap);
+ return std::string(buf.data(), size);
+}
- // attention
- struct ggml_tensor * wq;
- struct ggml_tensor * wk;
- struct ggml_tensor * wv;
- struct ggml_tensor * wo;
+//
+// gguf constants (sync with gguf.py)
+//
- // normalization
- struct ggml_tensor * ffn_norm;
+enum llm_arch {
+ LLM_ARCH_LLAMA,
+ LLM_ARCH_FALCON,
+ LLM_ARCH_BAICHUAN,
+ LLM_ARCH_GPT2,
+ LLM_ARCH_GPTJ,
+ LLM_ARCH_GPTNEOX,
+ LLM_ARCH_MPT,
+ LLM_ARCH_UNKNOWN,
+};
- // ff
- struct ggml_tensor * w1;
- struct ggml_tensor * w2;
- struct ggml_tensor * w3;
+static std::map<llm_arch, std::string> LLM_ARCH_NAMES = {
+ { LLM_ARCH_LLAMA, "llama" },
+ { LLM_ARCH_FALCON, "falcon" },
+ { LLM_ARCH_GPT2, "gpt2" },
+ { LLM_ARCH_GPTJ, "gptj" },
+ { LLM_ARCH_GPTNEOX, "gptneox" },
+ { LLM_ARCH_MPT, "mpt" },
+ { LLM_ARCH_BAICHUAN,"baichuan" },
};
-struct llama_kv_cache {
- struct ggml_tensor * k;
- struct ggml_tensor * v;
+enum llm_kv {
+ LLM_KV_GENERAL_ARCHITECTURE,
+ LLM_KV_GENERAL_QUANTIZATION_VERSION,
+ LLM_KV_GENERAL_ALIGNMENT,
+ LLM_KV_GENERAL_NAME,
+ LLM_KV_GENERAL_AUTHOR,
+ LLM_KV_GENERAL_URL,
+ LLM_KV_GENERAL_DESCRIPTION,
+ LLM_KV_GENERAL_LICENSE,
+ LLM_KV_GENERAL_SOURCE_URL,
+ LLM_KV_GENERAL_SOURCE_HF_REPO,
+
+ LLM_KV_CONTEXT_LENGTH,
+ LLM_KV_EMBEDDING_LENGTH,
+ LLM_KV_BLOCK_COUNT,
+ LLM_KV_FEED_FORWARD_LENGTH,
+ LLM_KV_USE_PARALLEL_RESIDUAL,
+ LLM_KV_TENSOR_DATA_LAYOUT,
+
+ LLM_KV_ATTENTION_HEAD_COUNT,
+ LLM_KV_ATTENTION_HEAD_COUNT_KV,
+ LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
+ LLM_KV_ATTENTION_CLAMP_KQV,
+ LLM_KV_ATTENTION_LAYERNORM_EPS,
+ LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
+
+ LLM_KV_ROPE_DIMENSION_COUNT,
+ LLM_KV_ROPE_FREQ_BASE,
+ LLM_KV_ROPE_SCALE_LINEAR,
+
+ LLM_KV_TOKENIZER_MODEL,
+ LLM_KV_TOKENIZER_LIST,
+ LLM_KV_TOKENIZER_TOKEN_TYPE,
+ LLM_KV_TOKENIZER_SCORES,
+ LLM_KV_TOKENIZER_MERGES,
+ LLM_KV_TOKENIZER_BOS_ID,
+ LLM_KV_TOKENIZER_EOS_ID,
+ LLM_KV_TOKENIZER_UNK_ID,
+ LLM_KV_TOKENIZER_SEP_ID,
+ LLM_KV_TOKENIZER_PAD_ID,
+ LLM_KV_TOKENIZER_HF_JSON,
+ LLM_KV_TOKENIZER_RWKV,
+};
- struct ggml_context * ctx = NULL;
+static std::map<llm_kv, std::string> LLM_KV_NAMES = {
+ { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
+ { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
+ { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
+ { LLM_KV_GENERAL_NAME, "general.name" },
+ { LLM_KV_GENERAL_AUTHOR, "general.author" },
+ { LLM_KV_GENERAL_URL, "general.url" },
+ { LLM_KV_GENERAL_DESCRIPTION, "general.description" },
+ { LLM_KV_GENERAL_LICENSE, "general.license" },
+ { LLM_KV_GENERAL_SOURCE_URL, "general.source_url" },
+ { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source_hf_repo" },
+
+ { LLM_KV_CONTEXT_LENGTH, "%s.context_length" },
+ { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" },
+ { LLM_KV_BLOCK_COUNT, "%s.block_count" },
+ { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" },
+ { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" },
+ { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
+
+ { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
+ { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
+ { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" },
+ { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" },
+ { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" },
+ { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" },
+
+ { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" },
+ { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" },
+ { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" },
+
+ { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" },
+ { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" },
+ { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" },
+ { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" },
+ { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" },
+ { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" },
+ { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" },
+ { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" },
+ { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" },
+ { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" },
+ { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
+ { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
+};
- llama_ctx_buffer buf;
+struct LLM_KV {
+ LLM_KV(llm_arch arch) : arch(arch) {}
- int n; // number of tokens currently in the cache
+ llm_arch arch;
- ~llama_kv_cache() {
- if (ctx) {
- ggml_free(ctx);
- }
+ std::string operator()(llm_kv kv) const {
+ return ::format(LLM_KV_NAMES[kv].c_str(), LLM_ARCH_NAMES[arch].c_str());
}
};
-struct llama_model {
- e_model type = MODEL_UNKNOWN;
-
- llama_hparams hparams;
-
- struct ggml_tensor * tok_embeddings;
+enum llm_tensor {
+ LLM_TENSOR_TOKEN_EMBD,
+ LLM_TENSOR_POS_EMBD,
+ LLM_TENSOR_OUTPUT,
+ LLM_TENSOR_OUTPUT_NORM,
+ LLM_TENSOR_ROPE_FREQS,
+ LLM_TENSOR_ATTN_Q,
+ LLM_TENSOR_ATTN_K,
+ LLM_TENSOR_ATTN_V,
+ LLM_TENSOR_ATTN_QKV,
+ LLM_TENSOR_ATTN_OUT,
+ LLM_TENSOR_ATTN_NORM,
+ LLM_TENSOR_ATTN_NORM_2,
+ LLM_TENSOR_ATTN_ROT_EMBD,
+ LLM_TENSOR_FFN_GATE,
+ LLM_TENSOR_FFN_DOWN,
+ LLM_TENSOR_FFN_UP,
+ LLM_TENSOR_FFN_NORM,
+};
- struct ggml_tensor * norm;
- struct ggml_tensor * output;
+static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
+ {
+ LLM_ARCH_LLAMA,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
+ {
+ LLM_ARCH_BAICHUAN,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
+ {
+ LLM_ARCH_FALCON,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
+ {
+ LLM_ARCH_GPT2,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ },
+ },
+ {
+ LLM_ARCH_GPTJ,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ },
+ },
+ {
+ LLM_ARCH_GPTNEOX,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
+ {
+ LLM_ARCH_MPT,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ },
+ },
+ {
+ LLM_ARCH_UNKNOWN,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ },
+ },
+};
- std::vector<llama_layer> layers;
+static llm_arch llm_arch_from_string(const std::string & name) {
+ for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
+ if (kv.second == name) {
+ return kv.first;
+ }
+ }
- // context
- struct ggml_context * ctx = NULL;
+ return LLM_ARCH_UNKNOWN;
+}
- // key + value cache for the self attention
- // TODO: move to llama_state
- struct llama_kv_cache kv_self;
+// helper to handle gguf constants
+// usage:
+//
+// const auto tn = LLM_TN(LLM_ARCH_LLAMA);
+//
+// std::string name = tn(LLM_TENSOR_OUTPUT); -> "output"
+// std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias"
+// std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight"
+//
+struct LLM_TN {
+ LLM_TN(llm_arch arch) : arch(arch) {}
- // the model memory buffer
- llama_ctx_buffer buf;
+ llm_arch arch;
- // model memory mapped file
- std::unique_ptr<llama_mmap> mapping;
+ std::string operator()(llm_tensor tensor) const {
+ return LLM_TENSOR_NAMES[arch].at(tensor);
+ }
- // objects representing data potentially being locked in memory
- llama_mlock mlock_buf;
- llama_mlock mlock_mmap;
+ std::string operator()(llm_tensor tensor, const std::string & suffix) const {
+ return LLM_TENSOR_NAMES[arch].at(tensor) + "." + suffix;
+ }
- // for quantize-stats only
- std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
+ std::string operator()(llm_tensor tensor, int bid) const {
+ return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid);
+ }
- ~llama_model() {
- if (ctx) {
- ggml_free(ctx);
- }
+ std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
+ return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid) + "." + suffix;
}
};
-struct llama_vocab {
- using id = int32_t;
- using token = std::string;
+//
+// gguf helpers
+//
- struct token_score {
- token tok;
- float score;
- };
+#define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
+{ \
+ const std::string skey(key); \
+ const int kid = gguf_find_key(ctx, skey.c_str()); \
+ if (kid >= 0) { \
+ enum gguf_type ktype = gguf_get_kv_type(ctx, kid); \
+ if (ktype != (type)) { \
+ throw std::runtime_error(format("key %s has wrong type: %s", skey.c_str(), gguf_type_name(ktype))); \
+ } \
+ (dst) = func(ctx, kid); \
+ } else if (req) { \
+ throw std::runtime_error(format("key not found in model: %s", skey.c_str())); \
+ } \
+}
- std::unordered_map<token, id> token_to_id;
- std::vector<token_score> id_to_token;
-};
+//
+// ggml helpers
+//
-struct llama_context {
- std::mt19937 rng;
+static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
+ struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
- int64_t t_load_us = 0;
- int64_t t_start_us = 0;
- bool has_evaluated_once = false;
+ if (plan.work_size > 0) {
+ buf.resize(plan.work_size);
+ plan.work_data = buf.data();
+ }
- int64_t t_sample_us = 0;
- int64_t t_eval_us = 0;
- int64_t t_p_eval_us = 0;
+ ggml_graph_compute(graph, &plan);
+}
- int32_t n_sample = 0; // number of tokens sampled
- int32_t n_eval = 0; // number of eval calls
- int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
+//
+// llama helpers
+//
- llama_model model;
- llama_vocab vocab;
+#ifdef GGML_USE_CUBLAS
+# define llama_host_malloc(n) ggml_cuda_host_malloc(n)
+# define llama_host_free(data) ggml_cuda_host_free(data)
+#elif GGML_USE_METAL
+# define llama_host_malloc(n) ggml_metal_host_malloc(n)
+# define llama_host_free(data) ggml_metal_host_free(data)
+#elif GGML_USE_CPU_HBM
+# define llama_host_malloc(n) hbw_malloc(n)
+# define llama_host_free(data) if (data != NULL) hbw_free(data)
+#else
+# define llama_host_malloc(n) malloc(n)
+# define llama_host_free(data) free(data)
+#endif
- size_t mem_per_token = 0;
+#if defined(_WIN32)
+static std::string llama_format_win_err(DWORD err) {
+ LPSTR buf;
+ size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
+ if (!size) {
+ return "FormatMessageA failed";
+ }
+ std::string ret(buf, size);
+ LocalFree(buf);
+ return ret;
+}
+#endif
- // decode output (2-dimensional array: [n_tokens][n_vocab])
- std::vector<float> logits;
- bool logits_all = false;
+struct llama_buffer {
+ void * data = NULL;
+ size_t size = 0;
- // input embedding (1-dimensional array: [n_embd])
- std::vector<float> embedding;
+ // fallback to malloc / free
+ // useful in cases where CUDA can try to allocate PINNED memory
+ bool fallback = false;
- // memory buffers used to evaluate the model
- // TODO: move in llama_state
- llama_ctx_buffer buf_compute;
- llama_ctx_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS];
+ void resize(size_t n) {
+ llama_host_free(data);
- int buf_last = 0;
- size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 };
+ data = llama_host_malloc(n);
+ if (!data) {
+ fallback = true;
+ data = malloc(n);
+ } else {
+ fallback = false;
+ }
- void use_buf(struct ggml_context * ctx, int i) {
-#if defined(LLAMA_USE_SCRATCH)
- size_t last_size = 0;
+ GGML_ASSERT(data);
+ size = n;
+ }
- if (i == -1) {
- last_size = ggml_set_scratch(ctx, { 0, 0, nullptr, });
- } else {
- auto & buf = buf_scratch[i];
- last_size = ggml_set_scratch(ctx, { 0, buf.size, buf.addr, });
+ ~llama_buffer() {
+ if (data) {
+ if (fallback) { // NOLINT
+ free(data);
+ } else {
+ llama_host_free(data);
+ }
}
- if (buf_last >= 0) {
- buf_max_size[buf_last] = std::max(buf_max_size[buf_last], last_size);
+ data = NULL;
+ }
+};
+
+struct llama_file {
+ // use FILE * so we don't have to re-open the file to mmap
+ FILE * fp;
+ size_t size;
+
+ llama_file(const char * fname, const char * mode) {
+ fp = std::fopen(fname, mode);
+ if (fp == NULL) {
+ throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
}
+ seek(0, SEEK_END);
+ size = tell();
+ seek(0, SEEK_SET);
+ }
- buf_last = i;
+ size_t tell() const {
+#ifdef _WIN32
+ __int64 ret = _ftelli64(fp);
#else
- (void) i;
- (void) ctx;
+ long ret = std::ftell(fp);
#endif
+ GGML_ASSERT(ret != -1); // this really shouldn't fail
+ return (size_t) ret;
}
- size_t get_buf_max_mem(int i) const {
-#if defined(LLAMA_USE_SCRATCH)
- return buf_max_size[i];
+ void seek(size_t offset, int whence) const {
+#ifdef _WIN32
+ int ret = _fseeki64(fp, (__int64) offset, whence);
#else
- (void) i;
- return 0;
+ int ret = std::fseek(fp, (long) offset, whence);
#endif
+ GGML_ASSERT(ret == 0); // same
}
-};
-template <typename T>
-static T checked_mul(T a, T b) {
- T ret = a * b;
- if (a != 0 && ret / a != b) {
- throw format("overflow multiplying %llu * %llu",
- (unsigned long long) a, (unsigned long long) b);
+ void read_raw(void * ptr, size_t len) const {
+ if (len == 0) {
+ return;
+ }
+ errno = 0;
+ std::size_t ret = std::fread(ptr, len, 1, fp);
+ if (ferror(fp)) {
+ throw std::runtime_error(format("read error: %s", strerror(errno)));
+ }
+ if (ret != 1) {
+ throw std::runtime_error(std::string("unexpectedly reached end of file"));
+ }
}
- return ret;
-}
-static std::string llama_format_tensor_shape(const std::vector<uint32_t> & ne) {
- char buf[256];
- snprintf(buf, sizeof(buf), "%5u", ne.at(0));
- for (size_t i = 1; i < ne.size(); i++) {
- snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i));
+ uint32_t read_u32() const {
+ uint32_t ret;
+ read_raw(&ret, sizeof(ret));
+ return ret;
}
- return buf;
-}
-static size_t llama_calc_tensor_size(const std::vector<uint32_t> & ne, enum ggml_type type) {
- size_t size = ggml_type_size(type);
- for (uint32_t dim : ne) {
- size = checked_mul<size_t>(size, dim);
+ void write_raw(const void * ptr, size_t len) const {
+ if (len == 0) {
+ return;
+ }
+ errno = 0;
+ size_t ret = std::fwrite(ptr, len, 1, fp);
+ if (ret != 1) {
+ throw std::runtime_error(format("write error: %s", strerror(errno)));
+ }
}
- return size / ggml_blck_size(type);
-}
-
-struct llama_load_tensor_shard {
- std::vector<uint32_t> ne;
- size_t size;
- enum ggml_type type;
- size_t file_idx;
- size_t file_off;
- void calc_size() {
- size = llama_calc_tensor_size(ne, type);
+ void write_u32(std::uint32_t val) const {
+ write_raw(&val, sizeof(val));
}
-};
-enum llama_split_type {
- SPLIT_NONE,
- SPLIT_BY_COLUMNS,
- SPLIT_BY_ROWS
+ ~llama_file() {
+ if (fp) {
+ std::fclose(fp);
+ }
+ }
};
-struct llama_load_tensor {
- std::vector<llama_load_tensor_shard> shards;
-
- std::string name;
- enum ggml_type type = GGML_TYPE_F32;
- llama_split_type split_type = SPLIT_NONE;
- std::vector<uint32_t> ne;
+struct llama_mmap {
+ void * addr;
size_t size;
- struct ggml_tensor * ggml_tensor = NULL;
- uint8_t * data;
-
- llama_load_tensor(const std::string & name) : name(name) {}
- void calc_all() {
- calc_type();
- calc_split_type();
- calc_ne();
- calc_size();
- }
+ llama_mmap(const llama_mmap &) = delete;
- void calc_type() {
- const auto & first_shard = shards.at(0);
- for (const auto & shard : shards) {
- if (shard.type != first_shard.type) {
- throw format("inconsistent tensor shard type in '%s'", name.c_str());
- }
- }
- type = first_shard.type;
- }
+#ifdef _POSIX_MAPPED_FILES
+ static constexpr bool SUPPORTED = true;
- void calc_split_type() {
- if (shards.at(0).ne.size() == 1 || // 1D tensors are just duplicated in every file
- shards.size() == 1) { // only one file?
- split_type = SPLIT_NONE;
- } else if (name.find("tok_embeddings.") == 0 ||
- name.find(".attention.wo.weight") != std::string::npos ||
- name.find(".feed_forward.w2.weight") != std::string::npos) {
- split_type = SPLIT_BY_COLUMNS;
- } else {
- split_type = SPLIT_BY_ROWS;
+ llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
+ size = file->size;
+ int fd = fileno(file->fp);
+ int flags = MAP_SHARED;
+ // prefetch/readahead impairs performance on NUMA systems
+ if (numa) { prefetch = 0; }
+#ifdef __linux__
+ if (prefetch) { flags |= MAP_POPULATE; }
+#endif
+ addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
+ if (addr == MAP_FAILED) {
+ throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
}
- }
- void calc_ne() {
- const auto & first_shard = shards.at(0);
- for (const auto & shard : shards) {
- if (shard.ne != first_shard.ne) {
- throw format("inconsistent tensor shard shape in '%s': first was %s, other was %s",
- name.c_str(), llama_format_tensor_shape(first_shard.ne).c_str(), llama_format_tensor_shape(shard.ne).c_str());
+ if (prefetch > 0) {
+ // Advise the kernel to preload the mapped memory
+ if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) {
+ fprintf(stderr, "warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
+ strerror(errno));
}
}
- ne = first_shard.ne;
- LLAMA_ASSERT(shards.size() <= UINT32_MAX);
- uint32_t n_shards = (uint32_t) shards.size();
- switch (split_type) {
- case SPLIT_NONE:
- ne = first_shard.ne;
- break;
- case SPLIT_BY_COLUMNS:
- ne = {checked_mul<uint32_t>(first_shard.ne[0], n_shards),
- first_shard.ne[1]};
- break;
- case SPLIT_BY_ROWS:
- ne = {first_shard.ne[0],
- checked_mul<uint32_t>(first_shard.ne[1], n_shards)};
- break;
+ if (numa) {
+ // advise the kernel not to use readahead
+ // (because the next page might not belong on the same node)
+ if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) {
+ fprintf(stderr, "warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
+ strerror(errno));
+ }
}
}
- void calc_size() {
- size = llama_calc_tensor_size(ne, type);
+ ~llama_mmap() {
+ munmap(addr, size);
}
-};
+#elif defined(_WIN32)
+ static constexpr bool SUPPORTED = true;
-struct llama_load_tensors_map {
- // tensors is kept in a separate vector to preserve file order
- std::vector<llama_load_tensor> tensors;
- std::unordered_map<std::string, size_t> name_to_idx;
-};
+ llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) {
+ (void) numa;
-enum llama_file_version {
- LLAMA_FILE_VERSION_GGML,
- LLAMA_FILE_VERSION_GGMF_V1, // added version field and scores in vocab
- LLAMA_FILE_VERSION_GGJT_V1, // added padding
- LLAMA_FILE_VERSION_GGJT_V2, // changed quantization format
- LLAMA_FILE_VERSION_GGJT_V3, // changed Q4 and Q8 quantization format
-};
+ size = file->size;
-struct llama_file_loader {
- llama_file file;
- llama_file_version file_version;
- llama_hparams hparams;
- llama_vocab vocab;
+ HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
- llama_file_loader(const char * fname, size_t file_idx, llama_load_tensors_map & tensors_map)
- : file(fname, "rb") {
- fprintf(stderr, "llama.cpp: loading model from %s\n", fname);
- read_magic();
- read_hparams();
- read_vocab();
- read_tensor_metadata(file_idx, tensors_map);
- }
- void read_magic() {
- uint32_t magic = file.read_u32();
+ HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
+ DWORD error = GetLastError();
- if (magic == LLAMA_FILE_MAGIC_GGML) {
- file_version = LLAMA_FILE_VERSION_GGML;
- return;
+ if (hMapping == NULL) {
+ throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
}
- uint32_t version = file.read_u32();
+ addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
+ error = GetLastError();
+ CloseHandle(hMapping);
- switch (magic) {
- case LLAMA_FILE_MAGIC_GGMF:
- switch (version) {
- case 1: file_version = LLAMA_FILE_VERSION_GGMF_V1; return;
- }
- break;
- case LLAMA_FILE_MAGIC_GGJT:
- switch (version) {
- case 1: file_version = LLAMA_FILE_VERSION_GGJT_V1; return;
- case 2: file_version = LLAMA_FILE_VERSION_GGJT_V2; return;
- case 3: file_version = LLAMA_FILE_VERSION_GGJT_V3; return;
- }
+ if (addr == NULL) {
+ throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
}
- throw format("unknown (magic, version) combination: %08x, %08x; is this really a GGML file?",
- magic, version);
- }
- void read_hparams() {
- hparams.n_vocab = file.read_u32();
- hparams.n_embd = file.read_u32();
- hparams.n_mult = file.read_u32();
- hparams.n_head = file.read_u32();
- hparams.n_layer = file.read_u32();
- hparams.n_rot = file.read_u32();
- hparams.ftype = (enum llama_ftype) file.read_u32();
- }
- void read_vocab() {
- vocab.id_to_token.resize(hparams.n_vocab);
-
- for (uint32_t i = 0; i < hparams.n_vocab; i++) {
- uint32_t len = file.read_u32();
- std::string word = file.read_string(len);
-
- float score = 0.0f;
- if (file_version >= LLAMA_FILE_VERSION_GGMF_V1) {
- file.read_raw(&score, sizeof(score));
- }
-
- vocab.token_to_id[word] = i;
-
- auto & tok_score = vocab.id_to_token[i];
- tok_score.tok = std::move(word);
- tok_score.score = score;
- }
- }
- void read_tensor_metadata(size_t file_idx, llama_load_tensors_map & tensors_map) {
- while (file.tell() < file.size) {
- llama_load_tensor_shard shard;
- uint32_t n_dims = file.read_u32();
- uint32_t name_len = file.read_u32();
- shard.type = (enum ggml_type) file.read_u32();
- shard.ne.resize(n_dims);
- file.read_raw(shard.ne.data(), sizeof(shard.ne[0]) * n_dims);
- std::string name = file.read_string(name_len);
- if (n_dims < 1 || n_dims > 2) {
- throw format("llama.cpp: tensor '%s' should not be %u-dimensional", name.c_str(), n_dims);
- }
- switch (shard.type) {
- case GGML_TYPE_F32:
- case GGML_TYPE_F16:
- case GGML_TYPE_Q4_0:
- case GGML_TYPE_Q4_1:
- case GGML_TYPE_Q5_0:
- case GGML_TYPE_Q5_1:
- case GGML_TYPE_Q8_0:
- break;
- default: {
- throw format("unrecognized tensor type %u\n", shard.type);
+ if (prefetch) {
+ // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it
+ BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
+ HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll");
+
+ // may fail on pre-Windows 8 systems
+ pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
+
+ if (pPrefetchVirtualMemory) {
+ // advise the kernel to preload the mapped memory
+ WIN32_MEMORY_RANGE_ENTRY range;
+ range.VirtualAddress = addr;
+ range.NumberOfBytes = (SIZE_T)size;
+ if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
+ fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
+ llama_format_win_err(GetLastError()).c_str());
}
}
+ }
+ }
- if (file_version >= LLAMA_FILE_VERSION_GGJT_V1) {
- // skip to the next multiple of 32 bytes
- file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
- }
- shard.file_idx = file_idx;
- shard.file_off = file.tell();
+ ~llama_mmap() {
+ if (!UnmapViewOfFile(addr)) {
+ fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
+ llama_format_win_err(GetLastError()).c_str());
+ }
+ }
+#else
+ static constexpr bool SUPPORTED = false;
- shard.calc_size();
- file.seek(shard.size, SEEK_CUR);
+ llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) {
+ (void) file;
+ (void) prefetch;
+ (void) numa;
- auto it = tensors_map.name_to_idx.find(name);
- size_t idx;
- if (it != tensors_map.name_to_idx.end()) {
- idx = it->second;
- } else {
- tensors_map.tensors.emplace_back(name);
- idx = tensors_map.tensors.size() - 1;
- tensors_map.name_to_idx.emplace(name, idx);
- }
- tensors_map.tensors.at(idx).shards.push_back(shard);
- }
+ throw std::runtime_error(std::string("mmap not supported"));
}
+#endif
};
-struct llama_file_saver {
- llama_file file;
- llama_file_loader * any_file_loader;
- llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype)
- : file(fname, "wb"), any_file_loader(any_file_loader) {
- fprintf(stderr, "llama.cpp: saving model to %s\n", fname);
- write_magic();
- write_hparams(new_ftype);
- write_vocab();
- }
- void write_magic() {
- file.write_u32(LLAMA_FILE_MAGIC); // magic
- file.write_u32(LLAMA_FILE_VERSION); // version
- }
- void write_hparams(enum llama_ftype new_ftype) {
- const llama_hparams & hparams = any_file_loader->hparams;
- file.write_u32(hparams.n_vocab);
- file.write_u32(hparams.n_embd);
- file.write_u32(hparams.n_mult);
- file.write_u32(hparams.n_head);
- file.write_u32(hparams.n_layer);
- file.write_u32(hparams.n_rot);
- file.write_u32(new_ftype);
- }
- void write_vocab() {
- if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) {
- fprintf(stderr, "llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n");
- }
- uint32_t n_vocab = any_file_loader->hparams.n_vocab;
- for (uint32_t i = 0; i < n_vocab; i++) {
- const auto & token_score = any_file_loader->vocab.id_to_token.at(i);
- file.write_u32((uint32_t) token_score.tok.size());
- file.write_raw(token_score.tok.data(), token_score.tok.size());
- file.write_raw(&token_score.score, sizeof(token_score.score));
- }
- }
- void write_tensor(llama_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) {
- switch (new_type) {
- case GGML_TYPE_F32:
- case GGML_TYPE_F16:
- case GGML_TYPE_Q4_0:
- case GGML_TYPE_Q4_1:
- case GGML_TYPE_Q5_0:
- case GGML_TYPE_Q5_1:
- case GGML_TYPE_Q8_0:
- break;
- default: LLAMA_ASSERT(false);
- }
- file.write_u32((uint32_t) tensor.ne.size());
- file.write_u32((uint32_t) tensor.name.size());
- file.write_u32(new_type);
- file.write_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * tensor.ne.size());
- file.write_raw(tensor.name.data(), tensor.name.size());
- file.seek(-static_cast<ptrdiff_t>(file.tell()) & 31, SEEK_CUR);
- LLAMA_ASSERT(new_size == llama_calc_tensor_size(tensor.ne, new_type));
- file.write_raw(new_data, new_size);
+// Represents some region of memory being locked using mlock or VirtualLock;
+// will automatically unlock on destruction.
+struct llama_mlock {
+ void * addr = NULL;
+ size_t size = 0;
+
+ bool failed_already = false;
+
+ llama_mlock() {}
+ llama_mlock(const llama_mlock &) = delete;
+
+ ~llama_mlock() {
+ if (size) {
+ raw_unlock(addr, size);
+ }
}
-};
-struct llama_model_loader {
- std::vector<std::unique_ptr<llama_file_loader>> file_loaders;
- llama_load_tensors_map tensors_map;
- bool use_mmap;
- size_t num_ggml_tensors_created = 0;
- struct ggml_context * ggml_ctx = NULL;
- std::unique_ptr<llama_mmap> mapping;
+ void init(void * ptr) {
+ GGML_ASSERT(addr == NULL && size == 0); // NOLINT
+ addr = ptr;
+ }
- llama_model_loader(const std::string & fname_base, bool use_mmap, bool vocab_only) {
- auto * first_file = new llama_file_loader(fname_base.c_str(), 0, tensors_map);
- file_loaders.emplace_back(first_file);
- uint32_t n_parts = vocab_only ? 1 : guess_n_parts();
- for (uint32_t i = 1; i < n_parts; i++) {
- std::string fname = fname_base + "." + std::to_string(i);
- auto * ith_file = new llama_file_loader(fname.c_str(), i, tensors_map);
- file_loaders.emplace_back(ith_file);
- if (ith_file->hparams != first_file->hparams) {
- throw format("llama.cpp: hparams inconsistent between files");
+ void grow_to(size_t target_size) {
+ GGML_ASSERT(addr);
+ if (failed_already) {
+ return;
+ }
+ size_t granularity = lock_granularity();
+ target_size = (target_size + granularity - 1) & ~(granularity - 1);
+ if (target_size > size) {
+ if (raw_lock((uint8_t *) addr + size, target_size - size)) {
+ size = target_size;
+ } else {
+ failed_already = true;
}
}
- if (!llama_mmap::SUPPORTED) {
- use_mmap = false;
- }
- if (use_mmap && alignment_prevents_mmap()) {
- fprintf(stderr, "llama.cpp: can't use mmap because tensors are not aligned; convert to new format to avoid this\n");
- use_mmap = false;
- }
- this->use_mmap = use_mmap;
- for (llama_load_tensor & lt : tensors_map.tensors) {
- lt.calc_all();
- }
}
- bool alignment_prevents_mmap() {
- for (const llama_load_tensor & lt : tensors_map.tensors) {
- for (const llama_load_tensor_shard & shard : lt.shards) {
- if (shard.file_off & 3) {
- return true;
- }
- }
- }
- return false;
- }
+#ifdef _POSIX_MEMLOCK_RANGE
+ static constexpr bool SUPPORTED = true;
- uint32_t guess_n_parts() const {
- auto it = tensors_map.name_to_idx.find("tok_embeddings.weight");
- if (it == tensors_map.name_to_idx.end()) {
- throw std::string("missing tok_embeddings.weight");
- }
- const llama_load_tensor & lt = tensors_map.tensors.at(it->second);
- return file_loaders.at(0)->hparams.n_embd / lt.shards.at(0).ne.at(0);
+ static size_t lock_granularity() {
+ return (size_t) sysconf(_SC_PAGESIZE);
}
- void calc_sizes(size_t * ctx_size_p, size_t * mmapped_size_p) const {
- *ctx_size_p = *mmapped_size_p = 0;
- for (const llama_load_tensor & lt : tensors_map.tensors) {
- *ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE;
- *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size;
+ #ifdef __APPLE__
+ #define MLOCK_SUGGESTION \
+ "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
+ "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
+ #else
+ #define MLOCK_SUGGESTION \
+ "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
+ #endif
+
+ bool raw_lock(const void * addr, size_t size) const {
+ if (!mlock(addr, size)) {
+ return true;
}
- }
- struct ggml_tensor * get_tensor(const std::string & name, const std::vector<uint32_t> & ne, ggml_backend backend) {
- auto it = tensors_map.name_to_idx.find(name);
- if (it == tensors_map.name_to_idx.end()) {
- throw format("llama.cpp: tensor '%s' is missing from model", name.c_str());
+ char* errmsg = std::strerror(errno);
+ bool suggest = (errno == ENOMEM);
+
+ // Check if the resource limit is fine after all
+ struct rlimit lock_limit;
+ if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
+ suggest = false;
}
- llama_load_tensor & lt = tensors_map.tensors.at(it->second);
- if (lt.ne != ne) {
- throw format("llama.cpp: tensor '%s' has wrong shape; expected %s, got %s",
- name.c_str(), llama_format_tensor_shape(ne).c_str(), llama_format_tensor_shape(lt.ne).c_str());
+ if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
+ suggest = false;
}
- return get_tensor_for(lt, backend);
+ fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
+ size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
+ return false;
}
- struct ggml_tensor * get_tensor_for(llama_load_tensor & lt, ggml_backend backend) {
- struct ggml_tensor * tensor;
- if (lt.ne.size() == 2) {
- tensor = ggml_new_tensor_2d(ggml_ctx, lt.type, lt.ne.at(0), lt.ne.at(1));
- } else {
- LLAMA_ASSERT(lt.ne.size() == 1);
- tensor = ggml_new_tensor_1d(ggml_ctx, lt.type, lt.ne.at(0));
- }
- ggml_set_name(tensor, lt.name.c_str());
- LLAMA_ASSERT(lt.ggml_tensor == NULL); // if this fails, we called get_tensor twice on the same tensor
- tensor->backend = backend;
- lt.ggml_tensor = tensor;
- num_ggml_tensors_created++;
- return tensor;
- }
+ #undef MLOCK_SUGGESTION
- void done_getting_tensors() const {
- if (num_ggml_tensors_created != tensors_map.tensors.size()) {
- throw std::string("llama.cpp: file contained more tensors than expected");
+ static void raw_unlock(void * addr, size_t size) {
+ if (munlock(addr, size)) {
+ fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
}
}
+#elif defined(_WIN32)
+ static constexpr bool SUPPORTED = true;
- void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) {
- size_t data_size = 0;
- size_t prefetch_size = 0;
- for (const llama_load_tensor & lt : tensors_map.tensors) {
- data_size += lt.size;
- if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) {
- prefetch_size += lt.size;
- }
- }
+ static size_t lock_granularity() {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return (size_t) si.dwPageSize;
+ }
- if (use_mmap) {
- mapping.reset(new llama_mmap(&file_loaders.at(0)->file, prefetch_size));
- if (!lmlock) {
- // Don't call the callback since the actual loading will be lazy
- // and we can't measure it.
- progress_callback = NULL;
+ bool raw_lock(void * ptr, size_t len) const {
+ for (int tries = 1; ; tries++) {
+ if (VirtualLock(ptr, len)) {
+ return true;
}
- if (lmlock) {
- lmlock->init(mapping->addr);
+ if (tries == 2) {
+ fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
+ len, size, llama_format_win_err(GetLastError()).c_str());
+ return false;
}
- }
- size_t done_size = 0;
- for (llama_load_tensor & lt : tensors_map.tensors) {
- if (lt.ggml_tensor->backend != GGML_BACKEND_CPU) {
- continue;
- }
- if (progress_callback) {
- progress_callback((float) done_size / data_size, progress_callback_user_data);
+ // It failed but this was only the first try; increase the working
+ // set size and try again.
+ SIZE_T min_ws_size, max_ws_size;
+ if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
+ fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
+ llama_format_win_err(GetLastError()).c_str());
+ return false;
}
- LLAMA_ASSERT(lt.ggml_tensor); // unused tensors should have been caught by load_data already
- lt.data = (uint8_t *) lt.ggml_tensor->data;
- load_data_for(lt);
- lt.ggml_tensor->data = lt.data;
- done_size += lt.size;
- if (use_mmap && lmlock) {
- lmlock->grow_to(done_size);
+ // Per MSDN: "The maximum number of pages that a process can lock
+ // is equal to the number of pages in its minimum working set minus
+ // a small overhead."
+ // Hopefully a megabyte is enough overhead:
+ size_t increment = len + 1048576;
+ // The minimum must be <= the maximum, so we need to increase both:
+ min_ws_size += increment;
+ max_ws_size += increment;
+ if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
+ fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
+ llama_format_win_err(GetLastError()).c_str());
+ return false;
}
}
}
- void load_data_for(llama_load_tensor & lt) {
- if (use_mmap) {
- LLAMA_ASSERT(lt.shards.size() == 1);
- lt.data = (uint8_t *) mapping->addr + lt.shards.at(0).file_off;
- } else if (lt.split_type == SPLIT_NONE) {
- llama_file & file = file_loaders.at(lt.shards.at(0).file_idx)->file;
- file.seek(lt.shards.at(0).file_off, SEEK_SET);
- file.read_raw(lt.data, lt.size);
- } else if (lt.split_type == SPLIT_BY_ROWS) {
- size_t offset = 0;
- for (llama_load_tensor_shard & shard : lt.shards) {
- llama_file & file = file_loaders.at(shard.file_idx)->file;
- file.seek(shard.file_off, SEEK_SET);
- file.read_raw(lt.data + offset, shard.size);
- offset += shard.size;
- }
- LLAMA_ASSERT(offset == lt.size);
- } else if (lt.split_type == SPLIT_BY_COLUMNS) {
- // Let's load the data into temporary buffers to ensure the OS performs large loads.
- std::vector<llama_buffer> tmp_bufs(lt.shards.size());
- for (size_t i = 0; i < lt.shards.size(); i++) {
- llama_load_tensor_shard & shard = lt.shards.at(i);
- llama_file & file = file_loaders.at(shard.file_idx)->file;
- file.seek(shard.file_off, SEEK_SET);
- tmp_bufs.at(i).resize(shard.size);
- file.read_raw(tmp_bufs.at(i).addr, shard.size);
- }
- // Then reshape.
- size_t num_rows = lt.ne.at(1);
- size_t per_shard_row_size = lt.shards.at(0).size / num_rows;
- size_t out_offset = 0;
- for (size_t row = 0; row < num_rows; row++) {
- for (llama_buffer & tmp_buf : tmp_bufs) {
- memcpy(lt.data + out_offset,
- tmp_buf.addr + row * per_shard_row_size,
- per_shard_row_size);
- out_offset += per_shard_row_size;
- }
- }
- LLAMA_ASSERT(out_offset == lt.size);
- }
- if (0) {
- print_checksum(lt);
+ static void raw_unlock(void * ptr, size_t len) {
+ if (!VirtualUnlock(ptr, len)) {
+ fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
+ llama_format_win_err(GetLastError()).c_str());
}
}
+#else
+ static constexpr bool SUPPORTED = false;
- static void print_checksum(llama_load_tensor & lt) {
- uint32_t sum = 0;
- for (size_t i = 0; i < lt.size; i++) {
- uint8_t byte = lt.data[i];
- sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash
- }
- fprintf(stderr, "%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum,
- llama_format_tensor_shape(lt.ne).c_str(), lt.size);
+ static size_t lock_granularity() {
+ return (size_t) 65536;
}
-};
-
-
-//
-// kv cache
-//
-
-static bool kv_cache_init(
- const struct llama_hparams & hparams,
- struct llama_kv_cache & cache,
- ggml_type wtype,
- int n_ctx) {
- const int n_embd = hparams.n_embd;
- const int n_layer = hparams.n_layer;
-
- const int64_t n_mem = n_layer*n_ctx;
- const int64_t n_elements = n_embd*n_mem;
-
- cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
-
- struct ggml_init_params params;
- params.mem_size = cache.buf.size;
- params.mem_buffer = cache.buf.addr;
- params.no_alloc = false;
-
- cache.ctx = ggml_init(params);
-
- if (!cache.ctx) {
- fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
+ bool raw_lock(const void * addr, size_t len) const {
+ fprintf(stderr, "warning: mlock not supported on this system\n");
return false;
}
- cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
- cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
- ggml_set_name(cache.k, "cache_k");
- ggml_set_name(cache.v, "cache_v");
+ static void raw_unlock(const void * addr, size_t len) {}
+#endif
+};
- return true;
+typedef void (*offload_func_t)(struct ggml_tensor * tensor);
+
+static void llama_nop(struct ggml_tensor * tensor) { // don't offload by default
+ (void) tensor;
}
-struct llama_context_params llama_context_default_params() {
- struct llama_context_params result = {
- /*.n_ctx =*/ 512,
- /*.gpu_layers =*/ 0,
- /*.seed =*/ -1,
- /*.f16_kv =*/ true,
- /*.logits_all =*/ false,
- /*.vocab_only =*/ false,
- /*.use_mmap =*/ true,
- /*.use_mlock =*/ false,
- /*.embedding =*/ false,
- /*.progress_callback =*/ nullptr,
- /*.progress_callback_user_data =*/ nullptr,
- };
+static std::string llama_token_to_str(const struct llama_context * ctx, llama_token token) {
+ std::vector<char> result(8, 0);
+ const int n_tokens = llama_token_to_piece(ctx, token, result.data(), result.size());
+ if (n_tokens < 0) {
+ result.resize(-n_tokens);
+ int check = llama_token_to_piece(ctx, token, result.data(), result.size());
+ GGML_ASSERT(check == -n_tokens);
+ } else {
+ result.resize(n_tokens);
+ }
- return result;
+ return std::string(result.data(), result.size());
}
-bool llama_mmap_supported() {
- return llama_mmap::SUPPORTED;
-}
+//
+// globals
+//
-bool llama_mlock_supported() {
- return llama_mlock::SUPPORTED;
-}
+struct llama_state {
+ // We save the log callback globally
+ llama_log_callback log_callback = llama_log_callback_default;
+ void * log_callback_user_data = nullptr;
+};
-void llama_init_backend() {
- ggml_time_init();
+static llama_state g_state;
- // needed to initialize f16 tables
- {
- struct ggml_init_params params = { 0, NULL, false };
- struct ggml_context * ctx = ggml_init(params);
- ggml_free(ctx);
- }
-}
+// available llama models
+enum e_model {
+ MODEL_UNKNOWN,
+ MODEL_3B,
+ MODEL_7B,
+ MODEL_13B,
+ MODEL_30B,
+ MODEL_34B,
+ MODEL_40B,
+ MODEL_65B,
+ MODEL_70B,
+};
-int64_t llama_time_us() {
- return ggml_time_us();
-}
+static const size_t kB = 1024;
+static const size_t MB = kB*kB;
-//
-// model loading
-//
+// default hparams (LLaMA 7B)
+struct llama_hparams {
+ uint32_t n_vocab = 32000;
+ uint32_t n_ctx_train = 2048; // the context size used during training
+ uint32_t n_ctx = 512; // the context size used during inference
+ uint32_t n_embd = 4096;
+ uint32_t n_head = 32;
+ uint32_t n_head_kv = 32;
+ uint32_t n_layer = 32;
+ uint32_t n_rot = 64;
+ uint32_t n_ff = 11008;
+
+ float f_norm_eps = 1e-5;
+ float f_norm_rms_eps = 1e-5;
+
+ float rope_freq_base = 10000.0f;
+ float rope_freq_scale = 1.0f;
-static const char *llama_file_version_name(llama_file_version version) {
- switch (version) {
- case LLAMA_FILE_VERSION_GGML: return "'ggml' (old version with low tokenizer quality and no mmap support)";
- case LLAMA_FILE_VERSION_GGMF_V1: return "ggmf v1 (old version with no mmap support)";
- case LLAMA_FILE_VERSION_GGJT_V1: return "ggjt v1 (pre #1405)";
- case LLAMA_FILE_VERSION_GGJT_V2: return "ggjt v2 (pre #1508)";
- case LLAMA_FILE_VERSION_GGJT_V3: return "ggjt v3 (latest)";
+ bool operator!=(const llama_hparams & other) const {
+ return static_cast<bool>(memcmp(this, &other, sizeof(llama_hparams))); // NOLINT
}
- return "unknown";
-}
+ uint32_t n_gqa() const {
+ return n_head/n_head_kv;
+ }
-static const char *llama_ftype_name(enum llama_ftype ftype) {
- switch (ftype) {
- case LLAMA_FTYPE_ALL_F32: return "all F32";
- case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16";
- case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
- case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
- case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
- return "mostly Q4_1, some F16";
- case LLAMA_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0";
- case LLAMA_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1";
- case LLAMA_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0";
- default: return "unknown, may not work";
+ uint32_t n_embd_head() const {
+ return n_embd/n_head;
}
-}
-static const char *llama_model_type_name(e_model type) {
- switch (type) {
- case MODEL_7B: return "7B";
- case MODEL_13B: return "13B";
- case MODEL_30B: return "30B";
- case MODEL_65B: return "65B";
- default: LLAMA_ASSERT(false);
+ uint32_t n_embd_gqa() const {
+ return n_embd/n_gqa();
}
-}
-static void llama_model_load_internal(
- const std::string & fname,
- llama_context & lctx,
- int n_ctx,
- int n_gpu_layers,
- ggml_type memory_type,
- bool use_mmap,
- bool use_mlock,
- bool vocab_only,
- llama_progress_callback progress_callback,
- void * progress_callback_user_data) {
+ size_t kv_size() const {
+ size_t result = 2ull;
+ result *= (size_t) n_embd_gqa();
+ result *= (size_t) n_ctx;
+ result *= (size_t) n_layer;
+ result *= sizeof(ggml_fp16_t);
+ return result;
+ }
+};
- lctx.t_start_us = ggml_time_us();
+struct llama_layer {
+ // normalization
+ struct ggml_tensor * attn_norm;
+ struct ggml_tensor * attn_norm_b;
+ struct ggml_tensor * attn_norm_2;
+ struct ggml_tensor * attn_norm_2_b;
- std::unique_ptr<llama_model_loader> ml(new llama_model_loader(fname, use_mmap, vocab_only));
+ // attention
+ struct ggml_tensor * wq;
+ struct ggml_tensor * wk;
+ struct ggml_tensor * wv;
+ struct ggml_tensor * wo;
+ struct ggml_tensor * wqkv;
- lctx.vocab = std::move(ml->file_loaders.at(0)->vocab);
- auto & model = lctx.model;
- model.hparams = ml->file_loaders.at(0)->hparams;
- llama_file_version file_version = ml->file_loaders.at(0)->file_version;
- auto & hparams = model.hparams;
- uint32_t n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
+ // normalization
+ struct ggml_tensor * ffn_norm;
- {
- switch (hparams.n_layer) {
- case 32: model.type = e_model::MODEL_7B; break;
- case 40: model.type = e_model::MODEL_13B; break;
- case 60: model.type = e_model::MODEL_30B; break;
- case 80: model.type = e_model::MODEL_65B; break;
- }
+ // ff
+ struct ggml_tensor * w1; // ffn_gate
+ struct ggml_tensor * w2; // ffn_down
+ struct ggml_tensor * w3; // ffn_up
+};
- hparams.n_ctx = n_ctx;
- }
+struct llama_kv_cache {
+ struct ggml_tensor * k = NULL;
+ struct ggml_tensor * v = NULL;
- {
- fprintf(stderr, "%s: format = %s\n", __func__, llama_file_version_name(file_version));
- fprintf(stderr, "%s: n_vocab = %u\n", __func__, hparams.n_vocab);
- fprintf(stderr, "%s: n_ctx = %u\n", __func__, hparams.n_ctx);
- fprintf(stderr, "%s: n_embd = %u\n", __func__, hparams.n_embd);
- fprintf(stderr, "%s: n_mult = %u\n", __func__, hparams.n_mult);
- fprintf(stderr, "%s: n_head = %u\n", __func__, hparams.n_head);
- fprintf(stderr, "%s: n_layer = %u\n", __func__, hparams.n_layer);
- fprintf(stderr, "%s: n_rot = %u\n", __func__, hparams.n_rot);
- fprintf(stderr, "%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype));
- fprintf(stderr, "%s: n_ff = %u\n", __func__, n_ff);
- fprintf(stderr, "%s: n_parts = %zu\n", __func__, ml->file_loaders.size());
- fprintf(stderr, "%s: model size = %s\n", __func__, llama_model_type_name(model.type));
- }
+ struct ggml_context * ctx = NULL;
- if (file_version < LLAMA_FILE_VERSION_GGJT_V2) {
- if (hparams.ftype != LLAMA_FTYPE_ALL_F32 &&
- hparams.ftype != LLAMA_FTYPE_MOSTLY_F16 &&
- hparams.ftype != LLAMA_FTYPE_MOSTLY_Q8_0) {
- throw format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1405)");
- }
- }
+ llama_buffer buf;
+
+ int n; // number of tokens currently in the cache
- if (file_version < LLAMA_FILE_VERSION_GGJT_V3) {
- if (hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ||
- hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_1 ||
- hparams.ftype == LLAMA_FTYPE_MOSTLY_Q8_0) {
- throw format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1508)");
+ ~llama_kv_cache() {
+ if (ctx) {
+ ggml_free(ctx);
}
- }
- if (vocab_only) {
- return;
+#ifdef GGML_USE_CUBLAS
+ ggml_cuda_free_data(k);
+ ggml_cuda_free_data(v);
+#endif // GGML_USE_CUBLAS
}
+};
- auto & ctx = model.ctx;
-
- size_t ctx_size;
- size_t mmapped_size;
- ml->calc_sizes(&ctx_size, &mmapped_size);
- fprintf(stderr, "%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
-
- // create the ggml context
- {
- lctx.model.buf.resize(ctx_size);
- if (use_mlock) {
- lctx.model.mlock_buf.init(lctx.model.buf.addr);
- lctx.model.mlock_buf.grow_to(lctx.model.buf.size);
- }
+struct llama_vocab {
+ using id = int32_t;
+ using token = std::string;
+ using ttype = llama_token_type;
- struct ggml_init_params params = {
- /*.mem_size =*/ lctx.model.buf.size,
- /*.mem_buffer =*/ lctx.model.buf.addr,
- /*.no_alloc =*/ ml->use_mmap,
- };
+ struct token_data {
+ token text;
+ float score;
+ ttype type;
+ };
- model.ctx = ggml_init(params);
- if (!model.ctx) {
- throw format("ggml_init() failed");
- }
- }
+ enum llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM;
-#ifdef GGML_USE_CUBLAS
-#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
-#else
-#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU
-#endif
+ std::unordered_map<token, id> token_to_id;
+ std::vector<token_data> id_to_token;
- // prepare memory for the weights
- size_t vram_total = 0;
- {
- const uint32_t n_embd = hparams.n_embd;
- const uint32_t n_layer = hparams.n_layer;
- const uint32_t n_vocab = hparams.n_vocab;
+ std::map<std::pair<std::string, std::string>, int> bpe_ranks;
- ml->ggml_ctx = ctx;
+ // default LLaMA special tokens
+ id special_bos_id = 1;
+ id special_eos_id = 2;
+ id special_unk_id = 0;
+ id special_sep_id = -1;
+ id special_pad_id = -1;
- model.tok_embeddings = ml->get_tensor("tok_embeddings.weight", {n_embd, n_vocab}, GGML_BACKEND_CPU);
- model.norm = ml->get_tensor("norm.weight", {n_embd}, GGML_BACKEND_CPU);
+ id linefeed_id = 13;
- // "output" tensor
- {
- ggml_backend backend_output;
- if (n_gpu_layers > int(n_layer)) { // NOLINT
- backend_output = LLAMA_BACKEND_OFFLOAD;
- } else {
- backend_output = GGML_BACKEND_CPU;
- }
+ int find_bpe_rank(std::string token_left, std::string token_right) const {
+ replace_all(token_left, " ", "\u0120");
+ replace_all(token_left, "\n", "\u010A");
+ replace_all(token_right, " ", "\u0120");
+ replace_all(token_right, "\n", "\u010A");
- model.output = ml->get_tensor("output.weight", {n_embd, n_vocab}, backend_output);
+ auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
+ if (it == bpe_ranks.end()) {
+ return -1;
}
- const int i_gpu_start = n_layer - n_gpu_layers;
+ return it->second;
+ }
+};
- model.layers.resize(n_layer);
- for (uint32_t i = 0; i < n_layer; ++i) {
- const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+struct llama_model {
+ e_model type = MODEL_UNKNOWN;
+ llm_arch arch = LLM_ARCH_UNKNOWN;
+ llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
- auto & layer = model.layers[i];
+ std::string name = "n/a";
- std::string layers_i = "layers." + std::to_string(i);
+ llama_hparams hparams;
+ llama_vocab vocab;
- layer.attention_norm = ml->get_tensor(layers_i + ".attention_norm.weight", {n_embd}, backend);
+ struct ggml_tensor * tok_embeddings;
- layer.wq = ml->get_tensor(layers_i + ".attention.wq.weight", {n_embd, n_embd}, backend);
- layer.wk = ml->get_tensor(layers_i + ".attention.wk.weight", {n_embd, n_embd}, backend);
- layer.wv = ml->get_tensor(layers_i + ".attention.wv.weight", {n_embd, n_embd}, backend);
- layer.wo = ml->get_tensor(layers_i + ".attention.wo.weight", {n_embd, n_embd}, backend);
+ struct ggml_tensor * output_norm;
+ struct ggml_tensor * output_norm_b;
+ struct ggml_tensor * output;
- layer.ffn_norm = ml->get_tensor(layers_i + ".ffn_norm.weight", {n_embd}, backend);
+ std::vector<llama_layer> layers;
- layer.w1 = ml->get_tensor(layers_i + ".feed_forward.w1.weight", {n_embd, n_ff}, backend);
- layer.w2 = ml->get_tensor(layers_i + ".feed_forward.w2.weight", { n_ff, n_embd}, backend);
- layer.w3 = ml->get_tensor(layers_i + ".feed_forward.w3.weight", {n_embd, n_ff}, backend);
+ int n_gpu_layers;
- if (backend == GGML_BACKEND_GPU) {
- vram_total +=
- ggml_nbytes(layer.attention_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
- ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.attention_norm) +
- ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
- }
- }
- }
+ // context
+ struct ggml_context * ctx = NULL;
- ml->done_getting_tensors();
+ // the model memory buffer
+ llama_buffer buf;
- // print memory requirements
- {
- const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1;
+ // model memory mapped file
+ std::unique_ptr<llama_mmap> mapping;
- // this is the total memory required to run the inference
- const size_t mem_required =
- ctx_size +
- mmapped_size - vram_total + // weights in VRAM not in memory
- MEM_REQ_SCRATCH0().at(model.type) +
- MEM_REQ_SCRATCH1().at(model.type) +
- MEM_REQ_EVAL().at(model.type);
+ // objects representing data potentially being locked in memory
+ llama_mlock mlock_buf;
+ llama_mlock mlock_mmap;
- // this is the memory required by one llama_state
- const size_t mem_required_state =
- scale*MEM_REQ_KV_SELF().at(model.type);
+ // for quantize-stats only
+ std::vector<std::pair<std::string, struct ggml_tensor *>> tensors_by_name;
- fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
- mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
+ int64_t t_load_us = 0;
+ int64_t t_start_us = 0;
-#ifdef GGML_USE_CUBLAS
- const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
+ ~llama_model() {
+ if (ctx) {
+ ggml_free(ctx);
+ }
- fprintf(stderr, "%s: [cublas] offloading %d layers to GPU\n", __func__, n_gpu);
- if (n_gpu_layers > (int) hparams.n_layer) {
- fprintf(stderr, "%s: [cublas] offloading output layer to GPU\n", __func__);
+#ifdef GGML_USE_CUBLAS
+ for (size_t i = 0; i < tensors_by_name.size(); ++i) {
+ ggml_cuda_free_data(tensors_by_name[i].second);
+ }
+ ggml_cuda_free_scratch();
+#elif defined(GGML_USE_CLBLAST)
+ for (size_t i = 0; i < tensors_by_name.size(); ++i) {
+ ggml_cl_free_data(tensors_by_name[i].second);
}
- fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
-#else
- (void) n_gpu_layers;
#endif
}
+};
- // populate `tensors_by_name`
- for (llama_load_tensor & lt : ml->tensors_map.tensors) {
- model.tensors_by_name.emplace_back(lt.name, lt.ggml_tensor);
+struct llama_context {
+ llama_context(const llama_model & model) : model(model), t_load_us(model.t_load_us), t_start_us(model.t_start_us) {}
+ ~llama_context() {
+ if (model_owner) {
+ delete &model;
+ }
+#ifdef GGML_USE_METAL
+ if (ctx_metal) {
+ ggml_metal_free(ctx_metal);
+ }
+#endif
+ if (alloc) {
+ ggml_allocr_free(alloc);
+ }
}
- ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &lctx.model.mlock_mmap : NULL);
+ std::mt19937 rng;
-#ifdef GGML_USE_CUBLAS
- {
- size_t done_size = 0;
- size_t data_size = 0;
- for (llama_load_tensor & lt : ml->tensors_map.tensors) {
- data_size += lt.size;
- if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) {
- done_size += lt.size;
- }
+ bool has_evaluated_once = false;
+
+ int64_t t_sample_us = 0;
+ int64_t t_eval_us = 0;
+ int64_t t_p_eval_us = 0;
+
+ int32_t n_sample = 0; // number of tokens sampled
+ int32_t n_eval = 0; // number of eval calls
+ int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
+
+ const llama_model & model;
+
+ bool model_owner = false;
+
+ int64_t t_load_us;
+ int64_t t_start_us;
+
+ // key + value cache for the self attention
+ struct llama_kv_cache kv_self;
+
+ // decode output (2-dimensional array: [n_tokens][n_vocab])
+ std::vector<float> logits;
+ bool logits_all = false;
+
+ // input embedding (1-dimensional array: [n_embd])
+ std::vector<float> embedding;
+
+ // reusable buffer for `struct ggml_graph_plan.work_data`
+ std::vector<uint8_t> work_buffer;
+
+ // memory buffers used to evaluate the model
+ llama_buffer buf_compute;
+
+ llama_buffer buf_alloc;
+ ggml_allocr * alloc = NULL;
+
+#ifdef GGML_USE_METAL
+ ggml_metal_context * ctx_metal = NULL;
+#endif
+
+#ifdef GGML_USE_MPI
+ ggml_mpi_context * ctx_mpi = NULL;
+#endif
+};
+
+//
+// kv cache helpers
+//
+
+static bool llama_kv_cache_init(
+ const struct llama_hparams & hparams,
+ struct llama_kv_cache & cache,
+ ggml_type wtype,
+ int n_ctx,
+ int n_gpu_layers) {
+ const int n_embd = hparams.n_embd_gqa();
+ const int n_layer = hparams.n_layer;
+
+ const int64_t n_mem = n_layer*n_ctx;
+ const int64_t n_elements = n_embd*n_mem;
+
+ cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
+ cache.n = 0;
+
+ struct ggml_init_params params;
+ params.mem_size = cache.buf.size;
+ params.mem_buffer = cache.buf.data;
+ params.no_alloc = false;
+
+ cache.ctx = ggml_init(params);
+
+ if (!cache.ctx) {
+ LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__);
+ return false;
+ }
+
+ cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
+ cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
+ ggml_set_name(cache.k, "cache_k");
+ ggml_set_name(cache.v, "cache_v");
+
+ (void) n_gpu_layers;
+#ifdef GGML_USE_CUBLAS
+ if (n_gpu_layers > n_layer + 1) {
+ ggml_cuda_assign_buffers_no_scratch(cache.v);
+ }
+ if (n_gpu_layers > n_layer + 2) {
+ ggml_cuda_assign_buffers_no_scratch(cache.k);
+ }
+#endif // GGML_USE_CUBLAS
+
+ return true;
+}
+
+//
+// model loading and saving
+//
+
+enum llama_fver {
+ GGUF_FILE_VERSION_V1 = 1,
+ GGUF_FILE_VERSION_V2 = 2,
+};
+
+static const char * llama_file_version_name(llama_fver version) {
+ switch (version) {
+ case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
+ case GGUF_FILE_VERSION_V2: return "GGUF V2 (latest)";
+ }
+
+ return "unknown";
+}
+
+static std::string llama_format_tensor_shape(const std::vector<int64_t> & ne) {
+ char buf[256];
+ snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
+ for (size_t i = 1; i < ne.size(); i++) {
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
+ }
+ return buf;
+}
+
+static std::string llama_format_tensor_shape(const struct ggml_tensor * t) {
+ char buf[256];
+ snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]);
+ for (int i = 1; i < GGML_MAX_DIMS; i++) {
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]);
+ }
+ return buf;
+}
+
+struct llama_model_loader {
+ int n_kv = 0;
+ int n_tensors = 0;
+ int n_created = 0;
+
+ int64_t n_elements = 0;
+
+ bool use_mmap = false;
+
+ llama_file file;
+ llama_ftype ftype;
+ llama_fver fver;
+
+ std::unique_ptr<llama_mmap> mapping;
+
+ struct gguf_context * ctx_gguf = NULL;
+ struct ggml_context * ctx_meta = NULL;
+
+ llama_model_loader(const std::string & fname, bool use_mmap) : file(fname.c_str(), "rb") {
+ struct gguf_init_params params = {
+ /*.no_alloc = */ true,
+ /*.ctx = */ &ctx_meta,
+ };
+
+ ctx_gguf = gguf_init_from_file(fname.c_str(), params);
+ if (!ctx_gguf) {
+ throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
+ }
+
+ n_kv = gguf_get_n_kv(ctx_gguf);
+ n_tensors = gguf_get_n_tensors(ctx_gguf);
+
+ fver = (enum llama_fver ) gguf_get_version(ctx_gguf);
+
+ for (int i = 0; i < n_tensors; i++) {
+ const char * name = gguf_get_tensor_name(ctx_gguf, i);
+ struct ggml_tensor * t = ggml_get_tensor(ctx_meta, name);
+ n_elements += ggml_nelements(t);
+ }
+
+ LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
+ __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
+
+ // determine file type based on the number of tensors for each quantization and print meta data
+ // TODO: make optional
+ {
+ std::map<enum ggml_type, uint32_t> n_type;
+
+ uint32_t n_type_max = 0;
+ enum ggml_type type_max = GGML_TYPE_F32;
+
+ for (int i = 0; i < n_tensors; i++) {
+ const char * name = gguf_get_tensor_name(ctx_gguf, i);
+ struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, name);
+
+ n_type[meta->type]++;
+
+ if (n_type_max < n_type[meta->type]) {
+ n_type_max = n_type[meta->type];
+ type_max = meta->type;
+ }
+
+ LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str());
+ }
+
+ switch (type_max) {
+ case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break;
+ case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break;
+ case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break;
+ case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break;
+ case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break;
+ case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break;
+ case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break;
+ case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break;
+ case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break;
+ case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break;
+ case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
+ case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
+ default:
+ {
+ LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
+ ftype = LLAMA_FTYPE_ALL_F32;
+ } break;
+ }
+
+ // this is a way to mark that we have "guessed" the file type
+ ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
+
+ {
+ const int kid = gguf_find_key(ctx_gguf, "general.file_type");
+ if (kid >= 0) {
+ ftype = (llama_ftype) gguf_get_val_u32(ctx_gguf, kid);
+ }
+ }
+
+ for (int i = 0; i < n_kv; i++) {
+ const char * name = gguf_get_key(ctx_gguf, i);
+ const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
+
+ LLAMA_LOG_INFO("%s: - kv %3d: %42s %-8s\n", __func__, i, name, gguf_type_name(type));
+ }
+
+ // print type counts
+ for (auto & kv : n_type) {
+ if (kv.second == 0) {
+ continue;
+ }
+
+ LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
+ }
+ }
+
+ if (!llama_mmap::SUPPORTED) {
+ LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
+ use_mmap = false;
+ }
+
+ this->use_mmap = use_mmap;
+ }
+
+ ~llama_model_loader() {
+ if (ctx_gguf) {
+ gguf_free(ctx_gguf);
+ }
+ if (ctx_meta) {
+ ggml_free(ctx_meta);
+ }
+ }
+
+ std::string get_arch_name() const {
+ const auto kv = LLM_KV(LLM_ARCH_UNKNOWN);
+
+ std::string arch_name;
+ GGUF_GET_KEY(ctx_gguf, arch_name, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_GENERAL_ARCHITECTURE));
+
+ return arch_name;
+ }
+
+ enum llm_arch get_arch() const {
+ const std::string arch_name = get_arch_name();
+
+ return llm_arch_from_string(arch_name);
+ }
+
+ const char * get_tensor_name(int i) const {
+ return gguf_get_tensor_name(ctx_gguf, i);
+ }
+
+ struct ggml_tensor * get_tensor_meta(int i) const {
+ return ggml_get_tensor(ctx_meta, get_tensor_name(i));
+ }
+
+ void calc_sizes(size_t & ctx_size_p, size_t & mmapped_size_p) const {
+ ctx_size_p = 0;
+ mmapped_size_p = 0;
+
+ for (int i = 0; i < n_tensors; i++) {
+ struct ggml_tensor * meta = get_tensor_meta(i);
+ ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE;
+ (use_mmap ? mmapped_size_p : ctx_size_p) += ggml_nbytes_pad(meta);
+ }
+ }
+
+ struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta, ggml_backend backend) {
+ if (backend != GGML_BACKEND_CPU) {
+ ggml_set_no_alloc(ctx, true);
+ }
+
+ struct ggml_tensor * tensor = ggml_dup_tensor(ctx, meta);
+ tensor->backend = backend; // TODO: ggml_set_backend
+ ggml_set_name(tensor, ggml_get_name(meta));
+
+ if (backend != GGML_BACKEND_CPU) {
+ ggml_set_no_alloc(ctx, use_mmap);
+ }
+
+ n_created++;
+
+ return tensor;
+ }
+
+ struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend backend) {
+ struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
+
+ if (cur == NULL) {
+ throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
+ }
+
+ {
+ bool is_ok = true;
+ for (size_t i = 0; i < ne.size(); ++i) {
+ if (ne[i] != cur->ne[i]) {
+ is_ok = false;
+ break;
+ }
+ }
+ if (!is_ok) {
+ throw std::runtime_error(
+ format("%s: tensor '%s' has wrong shape; expected %s, got %s",
+ __func__, name.c_str(),
+ llama_format_tensor_shape(ne).c_str(),
+ llama_format_tensor_shape(cur).c_str()));
+ }
+ }
+
+ return create_tensor_for(ctx, cur, backend);
+ }
+
+ void done_getting_tensors() const {
+ if (n_created != n_tensors) {
+ throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
+ }
+ }
+
+ size_t file_offset(const char * name) const {
+ const int idx = gguf_find_tensor(ctx_gguf, name);
+
+ if (idx < 0) {
+ throw std::runtime_error(format("%s: tensor '%s' not found in the file", __func__, name));
+ }
+
+ return gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, idx);
+ }
+
+ void load_data_for(struct ggml_tensor * cur) const {
+ const size_t offs = file_offset(ggml_get_name(cur));
+
+ if (use_mmap) {
+ cur->data = (uint8_t *) mapping->addr + offs;
+ } else {
+ file.seek(offs, SEEK_SET);
+ file.read_raw(cur->data, ggml_nbytes(cur));
+ }
+ }
+
+ void load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) {
+ size_t size_data = 0;
+ size_t size_lock = 0;
+ size_t size_pref = 0; // prefetch
+
+ for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
+ struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
+ size_data += ggml_nbytes(cur);
+ if (cur->backend == GGML_BACKEND_CPU) {
+ size_pref += ggml_nbytes(cur);
+ }
+ }
+
+ if (use_mmap) {
+ mapping.reset(new llama_mmap(&file, size_pref, ggml_is_numa()));
+ if (lmlock) {
+ lmlock->init(mapping->addr);
+ }
+ }
+
+ size_t done_size = 0;
+ for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
+ struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
+ GGML_ASSERT(cur); // unused tensors should have been caught by load_data already
+
+ if (progress_callback) {
+ progress_callback((float) done_size / size_data, progress_callback_user_data);
+ }
+
+ // allocate temp buffer if not using mmap
+ if (!use_mmap && cur->data == NULL) {
+ GGML_ASSERT(cur->backend != GGML_BACKEND_CPU);
+ #ifdef GGML_USE_CPU_HBM
+ cur->data = (uint8_t*)hbw_malloc(ggml_nbytes(cur));
+ #else
+ cur->data = (uint8_t*)malloc(ggml_nbytes(cur));
+ #endif
+ }
+
+ load_data_for(cur);
+
+ switch (cur->backend) {
+ case GGML_BACKEND_CPU:
+ if (use_mmap && lmlock) {
+ size_lock += ggml_nbytes(cur);
+ lmlock->grow_to(size_lock);
+ }
+ break;
+#if defined(GGML_USE_CUBLAS)
+ case GGML_BACKEND_GPU:
+ case GGML_BACKEND_GPU_SPLIT:
+ // old code:
+ //ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor);
+
+ // TODO: test if this works !!
+ ggml_cuda_transform_tensor(cur->data, cur);
+ if (!use_mmap) {
+ free(cur->data);
+ }
+ break;
+#elif defined(GGML_USE_CLBLAST)
+ case GGML_BACKEND_GPU:
+ ggml_cl_transform_tensor(cur->data, cur);
+ if (!use_mmap) {
+ free(cur->data);
+ }
+ break;
+#endif
+ default:
+ continue;
+ }
+
+ done_size += ggml_nbytes(cur);
+ }
+ }
+};
+
+//
+// load LLaMA models
+//
+
+std::string llama_model_ftype_name(enum llama_ftype ftype) {
+ if (ftype & LLAMA_FTYPE_GUESSED) {
+ return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
+ }
+
+ switch (ftype) {
+ case LLAMA_FTYPE_ALL_F32: return "all F32";
+ case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16";
+ case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
+ case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
+ case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
+ return "mostly Q4_1, some F16";
+ case LLAMA_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0";
+ case LLAMA_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1";
+ case LLAMA_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0";
+
+ // K-quants
+ case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K";
+ case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "mostly Q3_K - Small";
+ case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "mostly Q3_K - Medium";
+ case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "mostly Q3_K - Large";
+ case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "mostly Q4_K - Small";
+ case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "mostly Q4_K - Medium";
+ case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "mostly Q5_K - Small";
+ case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "mostly Q5_K - Medium";
+ case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K";
+
+ default: return "unknown, may not work";
+ }
+}
+
+static const char * llama_model_type_name(e_model type) {
+ switch (type) {
+ case MODEL_3B: return "3B";
+ case MODEL_7B: return "7B";
+ case MODEL_13B: return "13B";
+ case MODEL_30B: return "30B";
+ case MODEL_34B: return "34B";
+ case MODEL_40B: return "40B";
+ case MODEL_65B: return "65B";
+ case MODEL_70B: return "70B";
+ default: return "?B";
+ }
+}
+
+static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
+ model.arch = ml.get_arch();
+ if (model.arch == LLM_ARCH_UNKNOWN) {
+ throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
+ }
+}
+
+static void llm_load_hparams(
+ llama_model_loader & ml,
+ llama_model & model,
+ int n_ctx,
+ float rope_freq_base,
+ float rope_freq_scale) {
+ struct gguf_context * ctx = ml.ctx_gguf;
+
+ const auto kv = LLM_KV(model.arch);
+
+ auto & hparams = model.hparams;
+
+ // get general kv
+ GGUF_GET_KEY(ctx, model.name, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_GENERAL_NAME));
+
+ // get hparams kv
+ GGUF_GET_KEY(ctx, hparams.n_vocab, gguf_get_arr_n, GGUF_TYPE_ARRAY, true, kv(LLM_KV_TOKENIZER_LIST));
+ GGUF_GET_KEY(ctx, hparams.n_ctx_train, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_CONTEXT_LENGTH));
+ GGUF_GET_KEY(ctx, hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_EMBEDDING_LENGTH));
+ GGUF_GET_KEY(ctx, hparams.n_ff, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_FEED_FORWARD_LENGTH));
+ GGUF_GET_KEY(ctx, hparams.n_head, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_ATTENTION_HEAD_COUNT));
+ GGUF_GET_KEY(ctx, hparams.n_layer, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_BLOCK_COUNT));
+
+ // n_head_kv is optional, default to n_head
+ hparams.n_head_kv = hparams.n_head;
+ GGUF_GET_KEY(ctx, hparams.n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ATTENTION_HEAD_COUNT_KV));
+
+ // TODO: manually setting rope freq base and scale should override this
+ // FIXME: partial fix when the param specified is not the default value, but
+ // will not work for overriding the model value to the params default
+
+ llama_context_params defaults = llama_context_default_params();
+
+ // rope_freq_base
+ {
+ float ropebase = 10000.0f;
+ GGUF_GET_KEY(ctx, ropebase, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_FREQ_BASE));
+ if (ropebase != 10000.0f && rope_freq_base == defaults.rope_freq_base) {
+ rope_freq_base = ropebase;
+ }
+ }
+
+ // rope_freq_scale (inverse of the kv) is optional
+ {
+ float ropescale = 1.0f;
+ GGUF_GET_KEY(ctx, ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALE_LINEAR));
+ if (ropescale != 1.0f && rope_freq_scale == defaults.rope_freq_scale) {
+ rope_freq_scale = 1.0f/ropescale;
+ }
+ }
+
+ // sanity check for n_rot (optional)
+ {
+ hparams.n_rot = hparams.n_embd / hparams.n_head;
+
+ GGUF_GET_KEY(ctx, hparams.n_rot, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ROPE_DIMENSION_COUNT));
+
+ if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON) {
+ if (hparams.n_rot != hparams.n_embd / hparams.n_head) {
+ throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd / hparams.n_head));
+ }
+ }
+ // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
+ // gpt-j n_rot = rotary_dim
+ }
+
+ // arch-specific KVs
+ switch (model.arch) {
+ case LLM_ARCH_LLAMA:
+ {
+ GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
+
+ switch (hparams.n_layer) {
+ case 26: model.type = e_model::MODEL_3B; break;
+ case 32: model.type = e_model::MODEL_7B; break;
+ case 40: model.type = e_model::MODEL_13B; break;
+ case 48: model.type = e_model::MODEL_34B; break;
+ case 60: model.type = e_model::MODEL_30B; break;
+ case 80: model.type = hparams.n_head == hparams.n_head_kv ? e_model::MODEL_65B : e_model::MODEL_70B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
+ case LLM_ARCH_FALCON:
+ {
+ GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
+
+ switch (hparams.n_layer) {
+ case 32: model.type = e_model::MODEL_7B; break;
+ case 60: model.type = e_model::MODEL_40B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
+ case LLM_ARCH_BAICHUAN:
+ {
+ GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
+ switch (hparams.n_layer) {
+ case 32: model.type = e_model::MODEL_7B; break;
+ case 40: model.type = e_model::MODEL_13B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
+ default: (void)0;
+ };
+
+ model.ftype = ml.ftype;
+
+ hparams.n_ctx = n_ctx;
+ hparams.rope_freq_base = rope_freq_base;
+ hparams.rope_freq_scale = rope_freq_scale;
+}
+
+// TODO: This should probably be in llama.h
+static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos);
+static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch);
+
+static void llm_load_vocab(
+ llama_model_loader & ml,
+ llama_model & model) {
+ auto & vocab = model.vocab;
+
+ struct gguf_context * ctx = ml.ctx_gguf;
+
+ const auto kv = LLM_KV(model.arch);
+
+ const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
+ if (token_idx == -1) {
+ throw std::runtime_error("cannot find tokenizer vocab in model file\n");
+ }
+
+ const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
+ if (score_idx == -1) {
+ throw std::runtime_error("cannot find tokenizer scores in model file\n");
+ }
+
+ const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
+
+ const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
+ if (toktype_idx == -1) {
+ throw std::runtime_error("cannot find token type list in GGUF file\n");
+ }
+
+ const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
+
+ // determine vocab type
+ {
+ std::string tokenizer_name;
+
+ GGUF_GET_KEY(ctx, tokenizer_name, gguf_get_val_str, GGUF_TYPE_STRING, true, kv(LLM_KV_TOKENIZER_MODEL));
+
+ if (tokenizer_name == "llama") {
+ vocab.type = LLAMA_VOCAB_TYPE_SPM;
+
+ // default special tokens
+ vocab.special_bos_id = 1;
+ vocab.special_eos_id = 2;
+ vocab.special_unk_id = 0;
+ vocab.special_sep_id = -1;
+ vocab.special_pad_id = -1;
+ } else if (tokenizer_name == "gpt2") {
+ vocab.type = LLAMA_VOCAB_TYPE_BPE;
+
+ // read bpe merges and populate bpe ranks
+ const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
+ if (merges_keyidx == -1) {
+ throw std::runtime_error("cannot find tokenizer merges in model file\n");
+ }
+
+ const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
+
+ for (int i = 0; i < n_merges; i++) {
+ const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
+
+ std::string first;
+ std::string second;
+
+ const size_t pos = word.find(' ', 1);
+
+ if (pos != std::string::npos) {
+ first = word.substr(0, pos);
+ second = word.substr(pos + 1);
+ }
+
+ vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
+ }
+
+ // default special tokens
+ vocab.special_bos_id = 11;
+ vocab.special_eos_id = 11;
+ vocab.special_unk_id = -1;
+ vocab.special_sep_id = -1;
+ vocab.special_pad_id = -1;
+ } else {
+ LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_name.c_str());
+ LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__);
+
+ vocab.type = LLAMA_VOCAB_TYPE_SPM;
+ }
+ }
+
+ const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
+
+ vocab.id_to_token.resize(n_vocab);
+
+ for (uint32_t i = 0; i < n_vocab; i++) {
+ std::string word = gguf_get_arr_str(ctx, token_idx, i);
+
+ vocab.token_to_id[word] = i;
+
+ auto & token_data = vocab.id_to_token[i];
+ token_data.text = std::move(word);
+ token_data.score = scores[i];
+ token_data.type = (llama_token_type) toktypes[i];
+ }
+
+ // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
+ if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
+ vocab.linefeed_id = llama_byte_to_token(vocab, '\n');
+ } else {
+ vocab.linefeed_id = llama_tokenize_internal(vocab, "\n", false)[0];
+ }
+
+ // special tokens
+ GGUF_GET_KEY(ctx, vocab.special_bos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_BOS_ID));
+ GGUF_GET_KEY(ctx, vocab.special_eos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_EOS_ID));
+ GGUF_GET_KEY(ctx, vocab.special_unk_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_UNK_ID));
+ GGUF_GET_KEY(ctx, vocab.special_sep_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_SEP_ID));
+ GGUF_GET_KEY(ctx, vocab.special_pad_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_TOKENIZER_PAD_ID));
+}
+
+static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
+ const auto & hparams = model.hparams;
+ const auto & vocab = model.vocab;
+
+ // hparams
+ LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml.fver));
+ LLAMA_LOG_INFO("%s: arch = %s\n", __func__, LLM_ARCH_NAMES.at(model.arch).c_str());
+ LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, vocab.type == LLAMA_VOCAB_TYPE_SPM ? "SPM" : "BPE"); // TODO: fix
+ LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
+ LLAMA_LOG_INFO("%s: n_merges = %u\n", __func__, (int) vocab.bpe_ranks.size());
+ LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
+ LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx);
+ LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd);
+ LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head);
+ LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv);
+ LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer);
+ LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim
+ LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa());
+ LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_eps);
+ LLAMA_LOG_INFO("%s: f_norm_rms_eps = %.1e\n", __func__, hparams.f_norm_rms_eps);
+ LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
+ LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base);
+ LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale);
+ LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
+ LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
+ LLAMA_LOG_INFO("%s: model size = %.2f B\n", __func__, ml.n_elements*1e-9);
+
+ // general kv
+ LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
+
+ // special tokens
+ if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
+ if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
+ if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
+ if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
+ if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
+ if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
+}
+
+static void llm_load_tensors(
+ llama_model_loader & ml,
+ llama_model & model,
+ int n_batch,
+ int n_gpu_layers,
+ int main_gpu,
+ const float * tensor_split,
+ const bool mul_mat_q,
+ bool low_vram,
+ ggml_type memory_type,
+ bool use_mlock,
+ llama_progress_callback progress_callback,
+ void * progress_callback_user_data) {
+ model.t_start_us = ggml_time_us();
+
+ auto & ctx = model.ctx;
+ auto & hparams = model.hparams;
+
+ model.n_gpu_layers = n_gpu_layers;
+
+ size_t ctx_size;
+ size_t mmapped_size;
+
+ ml.calc_sizes(ctx_size, mmapped_size);
+
+ LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
+
+ // create the ggml context
+ {
+ model.buf.resize(ctx_size);
+ if (use_mlock) {
+ model.mlock_buf.init (model.buf.data);
+ model.mlock_buf.grow_to(model.buf.size);
+ }
+
+ struct ggml_init_params params = {
+ /*.mem_size =*/ model.buf.size,
+ /*.mem_buffer =*/ model.buf.data,
+ /*.no_alloc =*/ ml.use_mmap,
+ };
+
+ model.ctx = ggml_init(params);
+ if (!model.ctx) {
+ throw std::runtime_error(format("ggml_init() failed"));
+ }
+ }
+
+ (void) main_gpu;
+ (void) mul_mat_q;
+#if defined(GGML_USE_CUBLAS)
+ LLAMA_LOG_INFO("%s: using " GGML_CUDA_NAME " for GPU acceleration\n", __func__);
+ ggml_cuda_set_main_device(main_gpu);
+ ggml_cuda_set_mul_mat_q(mul_mat_q);
+#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
+#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
+#elif defined(GGML_USE_CLBLAST)
+ LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__);
+#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
+#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU
+#else
+#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU
+#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_CPU
+#endif
+
+ // prepare memory for the weights
+ size_t vram_weights = 0;
+ {
+ const int64_t n_embd = hparams.n_embd;
+ const int64_t n_embd_gqa = hparams.n_embd_gqa();
+ const int64_t n_layer = hparams.n_layer;
+ const int64_t n_vocab = hparams.n_vocab;
+
+ const auto tn = LLM_TN(model.arch);
+ switch (model.arch) {
+ case LLM_ARCH_LLAMA:
+ {
+ model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+
+ // output
+ {
+ ggml_backend backend_norm;
+ ggml_backend backend_output;
+
+ if (n_gpu_layers > int(n_layer)) {
+ // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
+ // on Windows however this is detrimental unless everything is on the GPU
+#ifndef _WIN32
+ backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+#else
+ backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+#endif // _WIN32
+
+ backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
+
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
+
+ if (backend_norm == GGML_BACKEND_GPU) {
+ vram_weights += ggml_nbytes(model.output_norm);
+ }
+ if (backend_output == GGML_BACKEND_GPU_SPLIT) {
+ vram_weights += ggml_nbytes(model.output);
+ }
+ }
+
+ const uint32_t n_ff = hparams.n_ff;
+
+ const int i_gpu_start = n_layer - n_gpu_layers;
+
+ model.layers.resize(n_layer);
+
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
+ const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+
+ auto & layer = model.layers[i];
+
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
+
+ layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
+ layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
+ layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+
+ layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
+
+ layer.w1 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
+ layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+
+ if (backend == GGML_BACKEND_GPU) {
+ vram_weights +=
+ ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
+ ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
+ ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
+ }
+ }
+ } break;
+ case LLM_ARCH_BAICHUAN:
+ {
+ model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+ {
+ ggml_backend backend_norm;
+ ggml_backend backend_output;
+
+ if (n_gpu_layers > int(n_layer)) {
+ // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
+ // on Windows however this is detrimental unless everything is on the GPU
+#ifndef _WIN32
+ backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+#else
+ backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+#endif // _WIN32
+
+ backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
+
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
+
+ if (backend_norm == GGML_BACKEND_GPU) {
+ vram_weights += ggml_nbytes(model.output_norm);
+ }
+ if (backend_output == GGML_BACKEND_GPU_SPLIT) {
+ vram_weights += ggml_nbytes(model.output);
+ }
+ }
+
+ const uint32_t n_ff = hparams.n_ff;
+
+ const int i_gpu_start = n_layer - n_gpu_layers;
+
+ model.layers.resize(n_layer);
+
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
+ const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+
+ auto & layer = model.layers[i];
+
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
+
+ layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
+ layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
+ layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+
+ layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
+
+ layer.w1 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
+ layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+
+ if (backend == GGML_BACKEND_GPU) {
+ vram_weights +=
+ ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
+ ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
+ ggml_nbytes(layer.w1) + ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
+ }
+ }
+ } break;
+ case LLM_ARCH_FALCON:
+ {
+ // TODO: CPU-only for now
+
+ model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+
+ // output
+ {
+ ggml_backend backend_norm;
+ ggml_backend backend_output;
+
+ if (n_gpu_layers > int(n_layer)) {
+ // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
+ // on Windows however this is detrimental unless everything is on the GPU
+#ifndef _WIN32
+ backend_norm = low_vram ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+#else
+ backend_norm = low_vram || n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
+#endif // _WIN32
+
+ backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
+
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
+
+ if (backend_norm == GGML_BACKEND_GPU) {
+ vram_weights += ggml_nbytes(model.output_norm);
+ vram_weights += ggml_nbytes(model.output_norm_b);
+ }
+ if (backend_output == GGML_BACKEND_GPU_SPLIT) {
+ vram_weights += ggml_nbytes(model.output);
+ }
+ }
+
+ const uint32_t n_ff = hparams.n_ff;
+
+ const int i_gpu_start = n_layer - n_gpu_layers;
+
+ model.layers.resize(n_layer);
+
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
+ const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+
+ auto & layer = model.layers[i];
+
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
+ layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
+
+ if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i).c_str()) >= 0) {
+ layer.attn_norm_2 = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, backend);
+ layer.attn_norm_2_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, backend);
+
+ if (backend == GGML_BACKEND_GPU) {
+ vram_weights += ggml_nbytes(layer.attn_norm_2);
+ vram_weights += ggml_nbytes(layer.attn_norm_2_b);
+ }
+ }
+
+ layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+
+ layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
+ layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+
+ if (backend == GGML_BACKEND_GPU) {
+ vram_weights +=
+ ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) +
+ ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.wo) +
+ ggml_nbytes(layer.w2) + ggml_nbytes(layer.w3);
+ }
+ }
+ } break;
+ default:
+ throw std::runtime_error("unknown architecture");
+ };
+ }
+
+ ml.done_getting_tensors();
+
+ // print memory requirements
+ {
+ const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1;
+
+ // this is the total memory required to run the inference
+ size_t mem_required =
+ ctx_size +
+ mmapped_size - vram_weights; // weights in VRAM not in memory
+
+ // this is the memory required by one llama_state
+ const size_t mem_required_state = scale*hparams.kv_size();
+
+ LLAMA_LOG_INFO("%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
+ mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
+
+ (void) n_batch;
+
+#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
+ const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
+
+ LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
+ if (n_gpu_layers > (int) hparams.n_layer) {
+ LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
}
- for (llama_load_tensor & lt : ml->tensors_map.tensors) {
- if (lt.ggml_tensor->backend != GGML_BACKEND_GPU) {
- continue;
+ size_t vram_kv_cache = 0;
+
+#ifdef GGML_USE_CUBLAS
+ const int max_backend_supported_layers = hparams.n_layer + 3;
+ const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
+ if (n_gpu_layers > (int) hparams.n_layer + 1) {
+ if (low_vram) {
+ LLAMA_LOG_INFO("%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
+ } else {
+ LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__);
+ vram_kv_cache += hparams.kv_size() / 2;
}
- if (progress_callback) {
- progress_callback((float) done_size / data_size, progress_callback_user_data);
+ }
+ if (n_gpu_layers > (int) hparams.n_layer + 2) {
+ if (low_vram) {
+ LLAMA_LOG_WARN("%s: cannot offload k cache to GPU due to low VRAM option\n", __func__);
+ } else {
+ LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__);
+ vram_kv_cache += hparams.kv_size() / 2;
}
- ggml_cuda_load_data(fname.c_str(), lt.ggml_tensor, lt.shards.at(0).file_off);
- done_size += lt.size;
}
- }
+#elif defined(GGML_USE_CLBLAST)
+ const int max_backend_supported_layers = hparams.n_layer + 1;
+ const int max_offloadable_layers = hparams.n_layer + 1;
#endif // GGML_USE_CUBLAS
+ LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n",
+ __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
+ LLAMA_LOG_INFO("%s: VRAM used: %zu MB\n",
+ __func__, (vram_weights + vram_kv_cache + MB - 1) / MB); // round up
+#else
+ (void) n_gpu_layers;
+#endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
+ }
+
+ // populate `tensors_by_name`
+ for (int i = 0; i < ml.n_tensors; ++i) {
+ struct ggml_tensor * cur = ggml_get_tensor(ctx, ml.get_tensor_name(i));
+ model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
+ }
+
+ (void) tensor_split;
+#if defined(GGML_USE_CUBLAS)
+ {
+ ggml_cuda_set_tensor_split(tensor_split);
+ }
+#endif
+
+ ml.load_all_data(ctx, progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL);
+
if (progress_callback) {
progress_callback(1.0f, progress_callback_user_data);
}
- model.mapping = std::move(ml->mapping);
+ model.mapping = std::move(ml.mapping);
// loading time will be recalculate after the first eval, so
// we take page faults deferred by mmap() into consideration
- lctx.t_load_us = ggml_time_us() - lctx.t_start_us;
+ model.t_load_us = ggml_time_us() - model.t_start_us;
}
static bool llama_model_load(
const std::string & fname,
- llama_context & lctx,
+ llama_model & model,
int n_ctx,
+ int n_batch,
int n_gpu_layers,
+ int main_gpu,
+ const float * tensor_split,
+ const bool mul_mat_q,
+ float rope_freq_base,
+ float rope_freq_scale,
+ bool low_vram,
ggml_type memory_type,
bool use_mmap,
bool use_mlock,
llama_progress_callback progress_callback,
void *progress_callback_user_data) {
try {
- llama_model_load_internal(fname, lctx, n_ctx, n_gpu_layers, memory_type, use_mmap, use_mlock,
- vocab_only, progress_callback, progress_callback_user_data);
- return true;
- } catch (const std::string & err) {
- fprintf(stderr, "error loading model: %s\n", err.c_str());
+ std::unique_ptr<llama_model_loader> ml(new llama_model_loader(fname, use_mmap));
+
+ llm_load_arch (*ml, model);
+ llm_load_hparams(*ml, model, n_ctx, rope_freq_base, rope_freq_scale);
+ llm_load_vocab (*ml, model);
+
+ llm_load_print_meta(*ml, model);
+
+ if (model.hparams.n_vocab != model.vocab.id_to_token.size()) {
+ throw std::runtime_error("vocab size mismatch");
+ }
+
+ if (vocab_only) {
+ LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
+ return true;
+ }
+
+ llm_load_tensors(
+ *ml, model, n_batch, n_gpu_layers,
+ main_gpu, tensor_split, mul_mat_q, low_vram, memory_type,
+ use_mlock, progress_callback, progress_callback_user_data);
+ } catch (const std::exception & err) {
+ LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
return false;
}
+
+ return true;
}
-// evaluate the transformer
-//
-// - lctx: llama context
-// - tokens: new batch of tokens to process
-// - n_past: the context size so far
-// - n_threads: number of threads to use
-//
-static bool llama_eval_internal(
- llama_context & lctx,
- const llama_token * tokens,
- const int n_tokens,
- const int n_past,
- int n_threads) {
-
- // enforce that the first token is BOS
- if (n_past == 0 && tokens[0] != llama_token_bos()) {
- fprintf(stderr, "%s: first token must be BOS\n", __func__);
- return false;
+static struct ggml_cgraph * llm_build_llama(
+ llama_context & lctx,
+ const llama_token * tokens,
+ const float * embd,
+ int n_tokens,
+ int n_past) {
+
+ GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT
+
+ const int N = n_tokens;
+
+ const auto & model = lctx.model;
+ const auto & hparams = model.hparams;
+
+ const auto & kv_self = lctx.kv_self;
+
+ GGML_ASSERT(!!kv_self.ctx);
+
+ const int64_t n_embd = hparams.n_embd;
+ const int64_t n_layer = hparams.n_layer;
+ const int64_t n_ctx = hparams.n_ctx;
+ const int64_t n_head = hparams.n_head;
+ const int64_t n_head_kv = hparams.n_head_kv;
+ const int64_t n_embd_head = hparams.n_embd_head();
+ const int64_t n_embd_gqa = hparams.n_embd_gqa();
+
+ GGML_ASSERT(n_embd_head == hparams.n_rot);
+
+ const float freq_base = hparams.rope_freq_base;
+ const float freq_scale = hparams.rope_freq_scale;
+ const float norm_rms_eps = hparams.f_norm_rms_eps;
+
+ const int n_gpu_layers = model.n_gpu_layers;
+
+ auto & buf_compute = lctx.buf_compute;
+
+ struct ggml_init_params params = {
+ /*.mem_size =*/ buf_compute.size,
+ /*.mem_buffer =*/ buf_compute.data,
+ /*.no_alloc =*/ false,
+ };
+
+ params.no_alloc = true;
+
+ struct ggml_context * ctx0 = ggml_init(params);
+
+ ggml_cgraph * gf = ggml_new_graph(ctx0);
+
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
+
+ if (tokens) {
+ struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
+
+ ggml_allocr_alloc(lctx.alloc, inp_tokens);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
+ }
+ ggml_set_name(inp_tokens, "inp_tokens");
+
+ inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
+ } else {
+#ifdef GGML_USE_MPI
+ GGML_ASSERT(false && "not implemented");
+#endif
+
+ inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
+
+ ggml_allocr_alloc(lctx.alloc, inpL);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
+ }
}
- const int64_t t_start_us = ggml_time_us();
+ const int i_gpu_start = n_layer - n_gpu_layers;
+ (void) i_gpu_start;
+
+ // offload functions set the tensor output backend to GPU
+ // tensors are GPU-accelerated if any input or the output has been offloaded
+ //
+ // with the low VRAM option VRAM scratch is disabled in llama_load_model_internal
+ // in that case ggml_cuda_assign_buffers has no effect
+ offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
+ offload_func_t offload_func_kq = llama_nop;
+ offload_func_t offload_func_v = llama_nop;
+
+#ifdef GGML_USE_CUBLAS
+ if (n_gpu_layers > n_layer) {
+ offload_func_nr = ggml_cuda_assign_buffers_no_alloc;
+ }
+ if (n_gpu_layers > n_layer + 1) {
+ offload_func_v = ggml_cuda_assign_buffers_no_alloc;
+ }
+ if (n_gpu_layers > n_layer + 2) {
+ offload_func_kq = ggml_cuda_assign_buffers_no_alloc;
+ }
+#endif // GGML_USE_CUBLAS
+
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ ggml_allocr_alloc(lctx.alloc, KQ_scale);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
+ }
+ ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
+
+ for (int il = 0; il < n_layer; ++il) {
+ ggml_format_name(inpL, "layer_inp_%d", il);
+
+ offload_func_t offload_func = llama_nop;
+
+#ifdef GGML_USE_CUBLAS
+ if (il >= i_gpu_start) {
+ offload_func = ggml_cuda_assign_buffers_no_alloc;
+ }
+#endif // GGML_USE_CUBLAS
+
+ struct ggml_tensor * inpSA = inpL;
+
+ // norm
+ {
+ cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps);
+ offload_func(cur);
+ ggml_set_name(cur, "rms_norm_0");
+
+ // cur = cur*attn_norm(broadcasted)
+ cur = ggml_mul(ctx0, cur, model.layers[il].attn_norm);
+ offload_func(cur);
+ ggml_set_name(cur, "attention_norm_0");
+ }
+
+ // self-attention
+ {
+ // compute Q and K and RoPE them
+ struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
+ offload_func_kq(tmpk);
+ ggml_set_name(tmpk, "tmpk");
+
+ struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
+ offload_func_kq(tmpq);
+ ggml_set_name(tmpq, "tmpq");
+
+ struct ggml_tensor * Kcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
+ offload_func_kq(Kcur);
+ ggml_set_name(Kcur, "Kcur");
+
+ struct ggml_tensor * Qcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
+ offload_func_kq(Qcur);
+ ggml_set_name(Qcur, "Qcur");
+
+ // store key and value to memory
+ {
+ // compute the transposed [N, n_embd] V matrix
+
+ struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
+ offload_func_v(tmpv);
+ ggml_set_name(tmpv, "tmpv");
+
+ struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, N));
+ offload_func_v(Vcur);
+ ggml_set_name(Vcur, "Vcur");
+
+ struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + n_past));
+ offload_func_kq(k);
+ ggml_set_name(k, "k");
+
+ struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd_gqa,
+ ( n_ctx)*ggml_element_size(kv_self.v),
+ (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + n_past*ggml_element_size(kv_self.v));
+ offload_func_v(v);
+ ggml_set_name(v, "v");
+
+ // important: storing RoPE-ed version of K in the KV cache!
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
+ }
+
+ struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
+ offload_func_kq(Q);
+ ggml_set_name(Q, "Q");
+
+ struct ggml_tensor * K =
+ ggml_view_3d(ctx0, kv_self.k,
+ n_embd_head, n_past + N, n_head_kv,
+ ggml_element_size(kv_self.k)*n_embd_gqa,
+ ggml_element_size(kv_self.k)*n_embd_head,
+ ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il);
+ offload_func_kq(K);
+ ggml_set_name(K, "K");
+
+ // K * Q
+ struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
+ offload_func_kq(KQ);
+ ggml_set_name(KQ, "KQ");
+
+ // KQ_scaled = KQ / sqrt(n_embd_head)
+ // KQ_scaled shape [n_past + N, N, n_head, 1]
+ struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
+ offload_func_kq(KQ_scaled);
+ ggml_set_name(KQ_scaled, "KQ_scaled");
+
+ // KQ_masked = mask_past(KQ_scaled)
+ struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
+ offload_func_kq(KQ_masked);
+ ggml_set_name(KQ_masked, "KQ_masked");
+
+ // KQ = soft_max(KQ_masked)
+ struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
+ offload_func_v(KQ_soft_max);
+ ggml_set_name(KQ_soft_max, "KQ_soft_max");
+
+ // split cached V into n_head heads
+ struct ggml_tensor * V =
+ ggml_view_3d(ctx0, kv_self.v,
+ n_past + N, n_embd_head, n_head_kv,
+ ggml_element_size(kv_self.v)*n_ctx,
+ ggml_element_size(kv_self.v)*n_ctx*n_embd_head,
+ ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il);
+ offload_func_v(V);
+ ggml_set_name(V, "V");
+
+#if 1
+ struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
+ offload_func_v(KQV);
+ ggml_set_name(KQV, "KQV");
+#else
+ // make V contiguous in memory to speed up the matmul, however we waste time on the copy
+ // on M1 this is faster for the perplexity computation, but ~5% slower for the single-token generation
+ // is there a better way?
+ struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd_head, n_head));
+ struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_cont, KQ_soft_max);
+#endif
+
+ // KQV_merged = KQV.permute(0, 2, 1, 3)
+ struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
+ offload_func_v(KQV_merged);
+ ggml_set_name(KQV_merged, "KQV_merged");
+
+ // cur = KQV_merged.contiguous().view(n_embd, N)
+ cur = ggml_cpy(ctx0,
+ KQV_merged,
+ ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
+ offload_func_v(cur);
+ ggml_set_name(cur, "KQV_merged_contiguous");
+
+ // projection (no bias)
+ cur = ggml_mul_mat(ctx0,
+ model.layers[il].wo,
+ cur);
+ offload_func(cur);
+ ggml_set_name(cur, "result_wo");
+ }
+
+ struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
+ offload_func(inpFF);
+ ggml_set_name(inpFF, "inpFF");
+
+ // feed-forward network
+ {
+ // norm
+ {
+ cur = ggml_rms_norm(ctx0, inpFF, norm_rms_eps);
+ offload_func(cur);
+ ggml_set_name(cur, "rms_norm_1");
+
+ // cur = cur*ffn_norm(broadcasted)
+ cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm);
+ offload_func(cur);
+ ggml_set_name(cur, "ffn_norm");
+ }
+
+ struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
+ model.layers[il].w3,
+ cur);
+ offload_func(tmp);
+ ggml_set_name(tmp, "result_w3");
+
+ cur = ggml_mul_mat(ctx0,
+ model.layers[il].w1,
+ cur);
+ offload_func(cur);
+ ggml_set_name(cur, "result_w1");
+
+ // SILU activation
+ cur = ggml_silu(ctx0, cur);
+ offload_func(cur);
+ ggml_set_name(cur, "silu");
+
+ cur = ggml_mul(ctx0, cur, tmp);
+ offload_func(cur);
+ ggml_set_name(cur, "silu_x_result_w3");
+
+ cur = ggml_mul_mat(ctx0,
+ model.layers[il].w2,
+ cur);
+ offload_func(cur);
+ ggml_set_name(cur, "result_w2");
+ }
+
+ cur = ggml_add(ctx0, cur, inpFF);
+ offload_func(cur);
+ ggml_set_name(cur, "inpFF_+_result_w2");
+
+ // input for next layer
+ inpL = cur;
+ }
+
+ cur = inpL;
+
+ // norm
+ {
+ cur = ggml_rms_norm(ctx0, cur, norm_rms_eps);
+ offload_func_nr(cur);
+ ggml_set_name(cur, "rms_norm_2");
+
+ // cur = cur*norm(broadcasted)
+ cur = ggml_mul(ctx0, cur, model.output_norm);
+ // offload_func_nr(cur); // TODO CPU + GPU mirrored backend
+ ggml_set_name(cur, "result_norm");
+ }
+
+ // lm_head
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ ggml_set_name(cur, "result_output");
+
+ ggml_build_forward_expand(gf, cur);
+
+ ggml_free(ctx0);
+
+ return gf;
+}
+
+
+static struct ggml_cgraph * llm_build_baichaun(
+ llama_context & lctx,
+ const llama_token * tokens,
+ const float * embd,
+ int n_tokens,
+ int n_past) {
+
+ GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT
const int N = n_tokens;
const auto & model = lctx.model;
const auto & hparams = model.hparams;
- const auto & kv_self = model.kv_self;
+ const auto & kv_self = lctx.kv_self;
+
+ GGML_ASSERT(!!kv_self.ctx);
- LLAMA_ASSERT(!!kv_self.ctx);
+ const int64_t n_embd = hparams.n_embd;
+ const int64_t n_layer = hparams.n_layer;
+ const int64_t n_ctx = hparams.n_ctx;
+ const int64_t n_head = hparams.n_head;
+ const int64_t n_head_kv = hparams.n_head_kv;
+ const int64_t n_embd_head = hparams.n_embd_head();
+ const int64_t n_embd_gqa = hparams.n_embd_gqa();
- const int n_embd = hparams.n_embd;
- const int n_layer = hparams.n_layer;
- const int n_ctx = hparams.n_ctx;
- const int n_head = hparams.n_head;
- const int n_vocab = hparams.n_vocab;
- const int n_rot = hparams.n_embd/hparams.n_head;
+ GGML_ASSERT(n_embd_head == hparams.n_rot);
+
+ const float freq_base = hparams.rope_freq_base;
+ const float freq_scale = hparams.rope_freq_scale;
+ const float norm_rms_eps = hparams.f_norm_rms_eps;
- const float eps = 5e-6f; // TODO: take from hparams
+ const int n_gpu_layers = model.n_gpu_layers;
- auto & mem_per_token = lctx.mem_per_token;
- auto & buf_compute = lctx.buf_compute;
+ auto & buf_compute = lctx.buf_compute;
struct ggml_init_params params = {
/*.mem_size =*/ buf_compute.size,
- /*.mem_buffer =*/ buf_compute.addr,
+ /*.mem_buffer =*/ buf_compute.data,
/*.no_alloc =*/ false,
};
+ params.no_alloc = true;
+
struct ggml_context * ctx0 = ggml_init(params);
- // for big prompts, if BLAS is enabled, it is better to use only one thread
- // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
- ggml_cgraph gf = {};
- n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
+ ggml_cgraph * gf = ggml_new_graph(ctx0);
+
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
+
+ if (tokens) {
+ struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
+
+ ggml_allocr_alloc(lctx.alloc, inp_tokens);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
+ }
+ ggml_set_name(inp_tokens, "inp_tokens");
+
+ inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
+ } else {
+#ifdef GGML_USE_MPI
+ GGML_ASSERT(false && "not implemented");
+#endif
+
+ inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
+
+ ggml_allocr_alloc(lctx.alloc, inpL);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
+ }
+ }
+
+ const int i_gpu_start = n_layer - n_gpu_layers;
+ (void) i_gpu_start;
- struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
- ggml_set_name(embd, "embd");
- memcpy(embd->data, tokens, N*ggml_element_size(embd));
+ // offload functions set the tensor output backend to GPU
+ // tensors are GPU-accelerated if any input or the output has been offloaded
+ //
+ // with the low VRAM option VRAM scratch is disabled in llama_load_model_internal
+ // in that case ggml_cuda_assign_buffers has no effect
+ offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
+ offload_func_t offload_func_kq = llama_nop;
+ offload_func_t offload_func_v = llama_nop;
- struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.tok_embeddings, embd);
+#ifdef GGML_USE_CUBLAS
+ if (n_gpu_layers > n_layer) {
+ offload_func_nr = ggml_cuda_assign_buffers_no_alloc;
+ }
+ if (n_gpu_layers > n_layer + 1) {
+ offload_func_v = ggml_cuda_assign_buffers_no_alloc;
+ }
+ if (n_gpu_layers > n_layer + 2) {
+ offload_func_kq = ggml_cuda_assign_buffers_no_alloc;
+ }
+#endif // GGML_USE_CUBLAS
+
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ ggml_allocr_alloc(lctx.alloc, KQ_scale);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
+ }
+ ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
for (int il = 0; il < n_layer; ++il) {
- struct ggml_tensor * inpSA = inpL;
+ ggml_format_name(inpL, "layer_inp_%d", il);
- struct ggml_tensor * cur;
+ offload_func_t offload_func = llama_nop;
- lctx.use_buf(ctx0, 0);
+#ifdef GGML_USE_CUBLAS
+ if (il >= i_gpu_start) {
+ offload_func = ggml_cuda_assign_buffers_no_alloc;
+ }
+#endif // GGML_USE_CUBLAS
+
+ struct ggml_tensor * inpSA = inpL;
// norm
{
- cur = ggml_rms_norm(ctx0, inpL, eps);
-
- // cur = cur*attention_norm(broadcasted)
- cur = ggml_mul(ctx0, cur, model.layers[il].attention_norm);
+ cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps);
+ offload_func(cur);
+ ggml_set_name(cur, "rms_norm_0");
+
+ // cur = cur*attn_norm(broadcasted)
+ cur = ggml_mul(ctx0, cur, model.layers[il].attn_norm);
+ offload_func(cur);
+ ggml_set_name(cur, "attention_norm_0");
}
// self-attention
{
// compute Q and K and RoPE them
- struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
- struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0, 0);
- ggml_set_name(Qcur, "Qcur");
+ struct ggml_tensor * tmpk = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
+ offload_func_kq(tmpk);
+ ggml_set_name(tmpk, "tmpk");
+
+ struct ggml_tensor * tmpq = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
+ offload_func_kq(tmpq);
+ ggml_set_name(tmpq, "tmpq");
+
+ struct ggml_tensor * Kcur;
+ struct ggml_tensor * Qcur;
+ switch (model.type) {
+ case MODEL_7B:
+ Kcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpk, n_embd_head, n_head_kv, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
+ Qcur = ggml_rope_custom_inplace(ctx0, ggml_reshape_3d(ctx0, tmpq, n_embd_head, n_head, N), n_past, n_embd_head, 0, 0, freq_base, freq_scale);
+ break;
+ case MODEL_13B:
+ Kcur = ggml_reshape_3d(ctx0, tmpk, n_embd/n_head, n_head, N);
+ Qcur = ggml_reshape_3d(ctx0, tmpq, n_embd/n_head, n_head, N);
+ break;
+ default:
+ GGML_ASSERT(false);
+ }
+
+ offload_func_kq(Kcur);
ggml_set_name(Kcur, "Kcur");
+ offload_func_kq(Qcur);
+ ggml_set_name(Qcur, "Qcur");
+
// store key and value to memory
{
// compute the transposed [N, n_embd] V matrix
- struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, cur), n_embd, N));
- struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd, (ggml_element_size(kv_self.k)*n_embd)*(il*n_ctx + n_past));
- struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd,
+ struct ggml_tensor * tmpv = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
+ offload_func_v(tmpv);
+ ggml_set_name(tmpv, "tmpv");
+
+ struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, N));
+ offload_func_v(Vcur);
+ ggml_set_name(Vcur, "Vcur");
+
+ struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + n_past));
+ offload_func_kq(k);
+ ggml_set_name(k, "k");
+
+ struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd_gqa,
( n_ctx)*ggml_element_size(kv_self.v),
- (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd + n_past*ggml_element_size(kv_self.v));
+ (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + n_past*ggml_element_size(kv_self.v));
+ offload_func_v(v);
+ ggml_set_name(v, "v");
// important: storing RoPE-ed version of K in the KV cache!
- ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
- ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
}
- struct ggml_tensor * Q =
- ggml_permute(ctx0,
- Qcur,
- 0, 2, 1, 3);
+ struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
+ offload_func_kq(Q);
ggml_set_name(Q, "Q");
struct ggml_tensor * K =
- ggml_permute(ctx0,
- ggml_reshape_3d(ctx0,
- ggml_view_1d(ctx0, kv_self.k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(kv_self.k)*n_embd),
- n_embd/n_head, n_head, n_past + N),
- 0, 2, 1, 3);
+ ggml_view_3d(ctx0, kv_self.k,
+ n_embd_head, n_past + N, n_head_kv,
+ ggml_element_size(kv_self.k)*n_embd_gqa,
+ ggml_element_size(kv_self.k)*n_embd_head,
+ ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il);
+ offload_func_kq(K);
ggml_set_name(K, "K");
// K * Q
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
+ offload_func_kq(KQ);
ggml_set_name(KQ, "KQ");
- // KQ_scaled = KQ / sqrt(n_embd/n_head)
- struct ggml_tensor * KQ_scale = ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head));
- ggml_set_name(KQ_scale, "1/sqrt(n_embd/n_head)");
-
+ // KQ_scaled = KQ / sqrt(n_embd_head)
// KQ_scaled shape [n_past + N, N, n_head, 1]
struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
+ offload_func_kq(KQ_scaled);
ggml_set_name(KQ_scaled, "KQ_scaled");
+ struct ggml_tensor * KQ_masked;
+ struct ggml_tensor * KQ_scaled_alibi;
+
+ switch (model.type) {
+ case MODEL_7B:
+ KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
+ break;
+ case MODEL_13B:
+ KQ_scaled_alibi =ggml_alibi(ctx0, KQ_scaled, n_past, n_head, 8);
+ ggml_set_name(KQ_scaled_alibi, "KQ_scaled_alibi");
+ KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled_alibi, n_past);
+ break;
+ default:
+ GGML_ASSERT(false);
+ }
// KQ_masked = mask_past(KQ_scaled)
- struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
- ggml_set_name(KQ_masked, "KQ_masked");
+ // struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
+ // struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled_alibi, n_past);
+ // offload_func_kq(KQ_masked);
+ // ggml_set_name(KQ_masked, "KQ_masked");
// KQ = soft_max(KQ_masked)
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
+ offload_func_v(KQ_soft_max);
ggml_set_name(KQ_soft_max, "KQ_soft_max");
-
// split cached V into n_head heads
struct ggml_tensor * V =
ggml_view_3d(ctx0, kv_self.v,
- n_past + N, n_embd/n_head, n_head,
- n_ctx*ggml_element_size(kv_self.v),
- n_ctx*ggml_element_size(kv_self.v)*n_embd/n_head,
- il*n_ctx*ggml_element_size(kv_self.v)*n_embd);
+ n_past + N, n_embd_head, n_head_kv,
+ ggml_element_size(kv_self.v)*n_ctx,
+ ggml_element_size(kv_self.v)*n_ctx*n_embd_head,
+ ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il);
+ offload_func_v(V);
ggml_set_name(V, "V");
#if 1
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
+ offload_func_v(KQV);
ggml_set_name(KQV, "KQV");
#else
// make V contiguous in memory to speed up the matmul, however we waste time on the copy
// on M1 this is faster for the perplexity computation, but ~5% slower for the single-token generation
// is there a better way?
- struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd/n_head, n_head));
+ struct ggml_tensor * V_cont = ggml_cpy(ctx0, V, ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd_head, n_head));
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_cont, KQ_soft_max);
#endif
// KQV_merged = KQV.permute(0, 2, 1, 3)
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
+ offload_func_v(KQV_merged);
ggml_set_name(KQV_merged, "KQV_merged");
// cur = KQV_merged.contiguous().view(n_embd, N)
cur = ggml_cpy(ctx0,
KQV_merged,
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
+ offload_func_v(cur);
ggml_set_name(cur, "KQV_merged_contiguous");
// projection (no bias)
cur = ggml_mul_mat(ctx0,
model.layers[il].wo,
cur);
+ offload_func(cur);
+ ggml_set_name(cur, "result_wo");
}
- lctx.use_buf(ctx0, 1);
-
struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpSA);
+ offload_func(inpFF);
+ ggml_set_name(inpFF, "inpFF");
// feed-forward network
{
// norm
{
- cur = ggml_rms_norm(ctx0, inpFF, eps);
+ cur = ggml_rms_norm(ctx0, inpFF, norm_rms_eps);
+ offload_func(cur);
+ ggml_set_name(cur, "rms_norm_1");
// cur = cur*ffn_norm(broadcasted)
cur = ggml_mul(ctx0, cur, model.layers[il].ffn_norm);
+ offload_func(cur);
+ ggml_set_name(cur, "ffn_norm");
}
struct ggml_tensor * tmp = ggml_mul_mat(ctx0,
model.layers[il].w3,
cur);
+ offload_func(tmp);
+ ggml_set_name(tmp, "result_w3");
cur = ggml_mul_mat(ctx0,
model.layers[il].w1,
cur);
+ offload_func(cur);
+ ggml_set_name(cur, "result_w1");
// SILU activation
cur = ggml_silu(ctx0, cur);
+ offload_func(cur);
+ ggml_set_name(cur, "silu");
cur = ggml_mul(ctx0, cur, tmp);
+ offload_func(cur);
+ ggml_set_name(cur, "silu_x_result_w3");
cur = ggml_mul_mat(ctx0,
model.layers[il].w2,
cur);
+ offload_func(cur);
+ ggml_set_name(cur, "result_w2");
}
cur = ggml_add(ctx0, cur, inpFF);
+ offload_func(cur);
+ ggml_set_name(cur, "inpFF_+_result_w2");
// input for next layer
inpL = cur;
}
- lctx.use_buf(ctx0, 0);
+ cur = inpL;
+
+ // norm
+ {
+ cur = ggml_rms_norm(ctx0, cur, norm_rms_eps);
+ offload_func_nr(cur);
+ ggml_set_name(cur, "rms_norm_2");
+
+ // cur = cur*norm(broadcasted)
+ cur = ggml_mul(ctx0, cur, model.output_norm);
+ // offload_func_nr(cur); // TODO CPU + GPU mirrored backend
+ ggml_set_name(cur, "result_norm");
+ }
+
+ // lm_head
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ ggml_set_name(cur, "result_output");
+
+ ggml_build_forward_expand(gf, cur);
+
+ ggml_free(ctx0);
+
+ return gf;
+}
+
+static struct ggml_cgraph * llm_build_falcon(
+ llama_context & lctx,
+ const llama_token * tokens,
+ const float * embd,
+ int n_tokens,
+ int n_past) {
+
+ GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT
+
+ const int N = n_tokens;
+
+ const auto & model = lctx.model;
+ const auto & hparams = model.hparams;
+
+ const auto & kv_self = lctx.kv_self;
+
+ GGML_ASSERT(!!kv_self.ctx);
+
+ const int64_t n_embd = hparams.n_embd;
+ const int64_t n_layer = hparams.n_layer;
+ const int64_t n_ctx = hparams.n_ctx;
+ const int64_t n_head = hparams.n_head;
+ const int64_t n_head_kv = hparams.n_head_kv;
+ const int64_t n_embd_head = hparams.n_embd_head();
+ const int64_t n_embd_gqa = hparams.n_embd_gqa();
+
+ GGML_ASSERT(n_embd_head == hparams.n_rot);
+
+ const float freq_base = hparams.rope_freq_base;
+ const float freq_scale = hparams.rope_freq_scale;
+ const float norm_eps = hparams.f_norm_eps;
+
+ const int n_gpu_layers = model.n_gpu_layers;
+
+ auto & buf_compute = lctx.buf_compute;
+
+ struct ggml_init_params params = {
+ /*.mem_size =*/ buf_compute.size,
+ /*.mem_buffer =*/ buf_compute.data,
+ /*.no_alloc =*/ false,
+ };
+
+ params.no_alloc = true;
+
+ struct ggml_context * ctx0 = ggml_init(params);
+
+ ggml_cgraph * gf = ggml_new_graph(ctx0);
+
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
+
+ if (tokens) {
+ struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
+
+ ggml_allocr_alloc(lctx.alloc, inp_tokens);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens));
+ }
+ ggml_set_name(inp_tokens, "inp_tokens");
+
+ inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
+ } else {
+#ifdef GGML_USE_MPI
+ GGML_ASSERT(false && "not implemented");
+#endif
+
+ inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N);
+
+ ggml_allocr_alloc(lctx.alloc, inpL);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL));
+ }
+ }
+
+ const int i_gpu_start = n_layer - n_gpu_layers;
+ (void) i_gpu_start;
+
+ // offload functions set the tensor output backend to GPU
+ // tensors are GPU-accelerated if any input or the output has been offloaded
+ //
+ // with the low VRAM option VRAM scratch is disabled in llama_load_model_internal
+ // in that case ggml_cuda_assign_buffers has no effect
+ offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
+ offload_func_t offload_func_kq = llama_nop;
+ offload_func_t offload_func_v = llama_nop;
+
+#ifdef GGML_USE_CUBLAS
+ if (n_gpu_layers > n_layer) {
+ offload_func_nr = ggml_cuda_assign_buffers_no_alloc;
+ }
+ if (n_gpu_layers > n_layer + 1) {
+ offload_func_v = ggml_cuda_assign_buffers_no_alloc;
+ }
+ if (n_gpu_layers > n_layer + 2) {
+ offload_func_kq = ggml_cuda_assign_buffers_no_alloc;
+ }
+#endif // GGML_USE_CUBLAS
+
+ struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
+ ggml_allocr_alloc(lctx.alloc, KQ_scale);
+ if (!ggml_allocr_is_measure(lctx.alloc)) {
+ ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head));
+ }
+ ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
+
+ for (int il = 0; il < n_layer; ++il) {
+ struct ggml_tensor * attn_norm;
+
+ offload_func_t offload_func = llama_nop;
+
+#ifdef GGML_USE_CUBLAS
+ if (il >= i_gpu_start) {
+ offload_func = ggml_cuda_assign_buffers_no_alloc;
+ }
+#endif // GGML_USE_CUBLAS
+
+ // self-attention
+ // TODO: refactor into common function (shared with LLaMA)
+ {
+ attn_norm = ggml_norm(ctx0, inpL, norm_eps);
+ offload_func(attn_norm);
+
+ attn_norm = ggml_add(ctx0,
+ ggml_mul(ctx0, attn_norm, model.layers[il].attn_norm),
+ model.layers[il].attn_norm_b);
+ offload_func(attn_norm->src[0]);
+ offload_func(attn_norm);
+
+ if (model.layers[il].attn_norm_2) { // Falcon-40B
+ cur = ggml_norm(ctx0, inpL, norm_eps);
+ offload_func(cur);
+
+ cur = ggml_add(ctx0,
+ ggml_mul(ctx0, cur, model.layers[il].attn_norm_2),
+ model.layers[il].attn_norm_2_b);
+ offload_func(cur->src[0]);
+ offload_func(cur);
+ } else { // Falcon 7B
+ cur = attn_norm;
+ }
+
+ // compute QKV
+
+ cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
+ offload_func_kq(cur);
+
+ // Note that the strides for Kcur, Vcur are set up so that the
+ // resulting views are misaligned with the tensor's storage
+ // (by applying the K/V offset we shift the tensor's original
+ // view to stick out behind the viewed QKV tensor's allocated
+ // memory, so to say). This is ok because no actual accesses
+ // happen to that out-of-range memory, but it can require some
+ // trickery when trying to accurately dump these views for
+ // debugging.
+
+ const size_t wsize = ggml_type_size(cur->type);
+
+ // TODO: these 2 ggml_conts are technically not needed, but we add them until CUDA support for
+ // non-contiguous views is added for the rope operator
+ struct ggml_tensor * tmpq = ggml_cont(ctx0, ggml_view_3d(
+ ctx0, cur, n_embd_head, n_head, N,
+ wsize * n_embd_head,
+ wsize * n_embd_head * (n_head + 2 * n_head_kv),
+ 0));
+ offload_func_kq(tmpq);
+
+ struct ggml_tensor * tmpk = ggml_cont(ctx0, ggml_view_3d(
+ ctx0, cur, n_embd_head, n_head_kv, N,
+ wsize * n_embd_head,
+ wsize * n_embd_head * (n_head + 2 * n_head_kv),
+ wsize * n_embd_head * n_head));
+ offload_func_kq(tmpk);
+
+ struct ggml_tensor * tmpv = ggml_view_3d(
+ ctx0, cur, n_embd_head, n_head_kv, N,
+ wsize * n_embd_head,
+ wsize * n_embd_head * (n_head + 2 * n_head_kv),
+ wsize * n_embd_head * (n_head + n_head_kv));
+ offload_func_v(tmpv);
+
+ // using mode = 2 for neox mode
+ struct ggml_tensor * Qcur = ggml_rope_custom_inplace(ctx0, tmpq, n_past, n_embd_head, 2, 0, freq_base, freq_scale);
+ offload_func_kq(Qcur);
+ struct ggml_tensor * Kcur = ggml_rope_custom_inplace(ctx0, tmpk, n_past, n_embd_head, 2, 0, freq_base, freq_scale);
+ offload_func_kq(Kcur);
+
+ {
+ struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, ggml_cont(ctx0, tmpv), n_embd_gqa, N));
+ offload_func_v(Vcur);
+ offload_func_v(Vcur->src[0]->src[0]);
+ ggml_set_name(Vcur, "Vcur");
+
+ struct ggml_tensor * k = ggml_view_1d(ctx0, kv_self.k, N*n_embd_gqa, (ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + n_past));
+ offload_func_kq(k);
+ ggml_set_name(k, "k");
+
+ struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, N, n_embd_gqa,
+ ( n_ctx)*ggml_element_size(kv_self.v),
+ (il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + n_past*ggml_element_size(kv_self.v));
+ offload_func_v(v);
+
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
+ ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
+ }
+
+ struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3);
+ offload_func_kq(Q);
+ ggml_set_name(Q, "Q");
+
+ struct ggml_tensor * K =
+ ggml_view_3d(ctx0, kv_self.k,
+ n_embd_head, n_past + N, n_head_kv,
+ ggml_element_size(kv_self.k)*n_embd_gqa,
+ ggml_element_size(kv_self.k)*n_embd_head,
+ ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il);
+ offload_func_kq(K);
+ ggml_set_name(K, "K");
+
+ struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
+ offload_func_kq(KQ);
+ ggml_set_name(KQ, "KQ");
+
+ struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale);
+ offload_func_kq(KQ_scaled);
+ ggml_set_name(KQ_scaled, "KQ_scaled");
+
+ struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
+ offload_func_kq(KQ_masked);
+ ggml_set_name(KQ_masked, "KQ_masked");
+
+ struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
+ offload_func_v(KQ_soft_max);
+ ggml_set_name(KQ_soft_max, "KQ_soft_max");
+
+ struct ggml_tensor * V =
+ ggml_view_3d(ctx0, kv_self.v,
+ n_past + N, n_embd_head, n_head_kv,
+ ggml_element_size(kv_self.v)*n_ctx,
+ ggml_element_size(kv_self.v)*n_ctx*n_embd_head,
+ ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il);
+ offload_func_v(V);
+ ggml_set_name(V, "V");
+
+ struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
+ offload_func_v(KQV);
+ ggml_set_name(KQV, "KQV");
+
+ struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
+ offload_func_v(KQV_merged);
+ ggml_set_name(KQV_merged, "KQV_merged");
+
+ cur = ggml_cpy(ctx0, KQV_merged, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
+ offload_func_v(cur);
+ ggml_set_name(cur, "KQV_merged_contiguous");
+
+ cur = ggml_mul_mat(ctx0, model.layers[il].wo, cur);
+ offload_func(cur);
+ ggml_set_name(cur, "result_wo");
+ }
+
+ struct ggml_tensor * attn_out = cur;
+
+ // feed forward
+ {
+ struct ggml_tensor * inpFF = attn_norm;
+
+ cur = ggml_mul_mat(ctx0, model.layers[il].w3, inpFF);
+ offload_func(cur);
+
+ cur = ggml_gelu(ctx0, cur);
+ offload_func(cur);
+ cur = ggml_mul_mat(ctx0, model.layers[il].w2, cur);
+ offload_func(cur);
+ }
+
+ cur = ggml_add(ctx0, cur, attn_out);
+ offload_func(cur);
+ cur = ggml_add(ctx0, cur, inpL);
+ offload_func(cur);
+
+ // input for next layer
+ inpL = cur;
+ }
- // used at the end to optionally extract the embeddings
- struct ggml_tensor * embeddings = NULL;
+ cur = inpL;
// norm
{
+ cur = ggml_norm(ctx0, cur, norm_eps);
+ offload_func_nr(cur);
+
+ cur = ggml_add(ctx0,
+ ggml_mul(ctx0, cur, model.output_norm),
+ model.output_norm_b);
+ ggml_set_name(cur, "result_norm");
+ }
+
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ ggml_set_name(cur, "result_output");
+
+ ggml_build_forward_expand(gf, cur);
+
+ ggml_free(ctx0);
+
+ return gf;
+}
+
+static struct ggml_cgraph * llama_build_graph(
+ llama_context & lctx,
+ const llama_token * tokens,
+ const float * embd,
+ int n_tokens,
+ int n_past) {
+ const auto & model = lctx.model;
+
+ struct ggml_cgraph * result = NULL;
+
+ switch (model.arch) {
+ case LLM_ARCH_LLAMA:
+ {
+ result = llm_build_llama(lctx, tokens, embd, n_tokens, n_past);
+ } break;
+ case LLM_ARCH_BAICHUAN:
+ {
+ result = llm_build_baichaun(lctx, tokens, embd, n_tokens, n_past);
+ } break;
+ case LLM_ARCH_FALCON:
+ {
+ result = llm_build_falcon(lctx, tokens, embd, n_tokens, n_past);
+ } break;
+ default:
+ GGML_ASSERT(false);
+ };
+
+ return result;
+}
+
+// evaluate the transformer
+//
+// - lctx: llama context
+// - tokens: new batch of tokens to process
+// - embd embeddings input
+// - n_tokens number of tokens
+// - n_past: the context size so far
+// - n_threads: number of threads to use
+//
+static bool llama_eval_internal(
+ llama_context & lctx,
+ const llama_token * tokens,
+ const float * embd,
+ int n_tokens,
+ int n_past,
+ int n_threads,
+ const char * cgraph_fname) {
+
+ GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT
+
+ GGML_ASSERT(n_tokens > 0);
+ GGML_ASSERT(n_past >= 0);
+ // TODO: keep the values of n_batch and n_ctx
+ // GGML_ASSERT(n_tokens <= n_batch);
+ // GGML_ASSERT(n_past + n_tokens <= n_ctx);
- inpL = ggml_rms_norm(ctx0, inpL, eps);
+ const int64_t t_start_us = ggml_time_us();
+
+#ifdef GGML_USE_MPI
+ ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
+#endif
+
+ GGML_ASSERT(n_threads > 0);
+
+ const int N = n_tokens;
+
+ const auto & model = lctx.model;
+ const auto & hparams = model.hparams;
+
+ const auto & kv_self = lctx.kv_self;
+
+ GGML_ASSERT(!!kv_self.ctx);
+
+ const int64_t n_embd = hparams.n_embd;
+ const int64_t n_vocab = hparams.n_vocab;
+
+ ggml_allocr_reset(lctx.alloc);
+
+ ggml_cgraph * gf = llama_build_graph(lctx, tokens, embd, n_tokens, n_past);
+
+ ggml_allocr_alloc_graph(lctx.alloc, gf);
+
+#ifdef GGML_USE_CUBLAS
+ for (int i = 0; i < gf->n_leafs; i++) {
+ ggml_tensor * node = gf->leafs[i];
+ if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) {
+ ggml_cuda_assign_scratch_offset(node, (char*)node->data - (char *) lctx.buf_alloc.data);
+ }
+ }
+
+ for (int i = 0; i < gf->n_nodes; i++) {
+ ggml_tensor * node = gf->nodes[i];
+ if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) {
+ ggml_cuda_assign_scratch_offset(node, (char*)node->data - (char *) lctx.buf_alloc.data);
+ }
+ }
+#endif
- // inpL = inpL*norm(broadcasted)
- inpL = ggml_mul(ctx0, inpL, model.norm);
+ // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
- embeddings = inpL;
+ // for big prompts, if BLAS is enabled, it is better to use only one thread
+ // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
+ // TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
+ // we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
+ // with the BLAS calls. need a better solution
+ if (N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
+ n_threads = std::min(4, n_threads);
}
- // lm_head
- inpL = ggml_mul_mat(ctx0, model.output, inpL);
+ struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
+ struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
+
+ GGML_ASSERT(strcmp(res->name, "result_output") == 0);
+ GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
+
+#if GGML_USE_MPI
+ const int64_t n_layer = hparams.n_layer;
+ ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);
+#endif
+
+#ifdef GGML_USE_METAL
+ if (lctx.ctx_metal) {
+ ggml_metal_set_n_cb (lctx.ctx_metal, n_threads);
+ ggml_metal_graph_compute(lctx.ctx_metal, gf);
+ } else {
+ ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
+ }
+#else
+ ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
+#endif
- lctx.use_buf(ctx0, -1);
+#if GGML_USE_MPI
+ ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer);
+#endif
- // logits -> probs
- //inpL = ggml_soft_max_inplace(ctx0, inpL);
+ // update kv token count
+ lctx.kv_self.n = n_past + N;
- // run the computation
- ggml_build_forward_expand (&gf, inpL);
- ggml_graph_compute_with_ctx(ctx0, &gf, n_threads);
+ if (cgraph_fname) {
+ ggml_graph_export(gf, cgraph_fname);
+ }
#ifdef GGML_PERF
// print timing information per ggml operation (for debugging purposes)
// requires GGML_PERF to be defined
- ggml_graph_print(&gf);
+ ggml_graph_print(gf);
#endif
// plot the computation graph in dot format (for debugging purposes)
//if (n_past%100 == 0) {
- // ggml_graph_dump_dot(&gf, NULL, "llama.dot");
+ // ggml_graph_dump_dot(gf, NULL, "llama.dot");
//}
- //embd_w.resize(n_vocab*N);
- //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
-
- // update kv token count
- lctx.model.kv_self.n = n_past + N;
-
// extract logits
{
auto & logits_out = lctx.logits;
if (lctx.logits_all) {
logits_out.resize(n_vocab * N);
- memcpy(logits_out.data(), (float *) ggml_get_data(inpL), sizeof(float)*n_vocab*N);
+ memcpy(logits_out.data(), (float *) ggml_get_data(res), sizeof(float)*n_vocab*N);
} else {
// return result for just the last token
logits_out.resize(n_vocab);
- memcpy(logits_out.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
+ memcpy(logits_out.data(), (float *) ggml_get_data(res) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
}
}
memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd);
}
- if (mem_per_token == 0) {
- mem_per_token = ggml_used_mem(ctx0)/N;
- }
-
-#if 0
- printf("\n%s: used_mem = %.3f MB, scratch -- %.3f MB %.3f MB\n", __func__,
- ggml_used_mem(ctx0)/1024.0/1024.0,
- lctx.get_buf_max_mem(0)/1024.0/1024.0,
- lctx.get_buf_max_mem(1)/1024.0/1024.0);
-#endif
-
- ggml_free(ctx0);
-
// measure the performance only for the single-token evals
if (N == 1) {
lctx.t_eval_us += ggml_time_us() - t_start_us;
lctx.n_p_eval += N;
}
- return true;
+ return true;
+}
+
+//
+// tokenizer
+//
+
+static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) {
+ return vocab.type;
+}
+
+static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) {
+ return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL;
+}
+
+static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) {
+ return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_UNKNOWN;
+}
+
+static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
+ return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_CONTROL;
+}
+
+static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
+ return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_BYTE;
+}
+
+static uint8_t llama_token_to_byte(const llama_vocab & vocab, llama_token id) {
+ GGML_ASSERT(llama_is_byte_token(vocab, id));
+ const auto& token_data = vocab.id_to_token.at(id);
+ auto buf = token_data.text.substr(3, 2);
+ return strtol(buf.c_str(), NULL, 16);
+}
+
+static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) {
+ char buf[7];
+ int result = snprintf(buf, sizeof(buf), "<0x%02X>", ch);
+ GGML_ASSERT(0 <= result && result < 7);
+ return vocab.token_to_id.at(buf);
}
-//
-// tokenizer
-//
+static void llama_escape_whitespace(std::string & text) {
+ replace_all(text, " ", "\xe2\x96\x81");
+}
-static size_t utf8_len(char src) {
- const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 };
- uint8_t highbits = static_cast<uint8_t>(src) >> 4;
- return lookup[highbits];
+static void llama_unescape_whitespace(std::string & word) {
+ replace_all(word, "\xe2\x96\x81", " ");
}
-struct llama_sp_symbol {
+struct llm_symbol {
using index = int;
index prev;
index next;
size_t n;
};
-static_assert(std::is_trivially_copyable<llama_sp_symbol>::value, "llama_sp_symbol is not trivially copyable");
+static_assert(std::is_trivially_copyable<llm_symbol>::value, "llm_symbol is not trivially copyable");
+
+// SPM tokenizer
+// original implementation:
+// https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
-struct llama_sp_bigram {
+struct llm_bigram_spm {
struct comparator {
- bool operator()(llama_sp_bigram & l, llama_sp_bigram & r) {
+ bool operator()(llm_bigram_spm & l, llm_bigram_spm & r) {
return (l.score < r.score) || (l.score == r.score && l.left > r.left);
}
};
- using queue_storage = std::vector<llama_sp_bigram>;
- using queue = std::priority_queue<llama_sp_bigram, queue_storage, comparator>;
- llama_sp_symbol::index left;
- llama_sp_symbol::index right;
+ using queue_storage = std::vector<llm_bigram_spm>;
+ using queue = std::priority_queue<llm_bigram_spm, queue_storage, comparator>;
+ llm_symbol::index left;
+ llm_symbol::index right;
float score;
size_t size;
};
-// original implementation:
-// https://github.com/ggerganov/llama.cpp/commit/074bea2eb1f1349a0118239c4152914aecaa1be4
-struct llama_tokenizer {
- llama_tokenizer(const llama_vocab & vocab): vocab_(vocab) {}
+struct llm_tokenizer_spm {
+ llm_tokenizer_spm(const llama_vocab & vocab): vocab(vocab) {}
void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
// split string into utf8 chars
int index = 0;
size_t offs = 0;
while (offs < text.size()) {
- llama_sp_symbol sym;
- size_t char_len = std::min(text.size() - offs, utf8_len(text[offs]));
+ llm_symbol sym;
+ size_t len = utf8_len(text[offs]);
sym.text = text.c_str() + offs;
- sym.n = char_len;
- offs += char_len;
+ sym.n = std::min(len, text.size() - offs);
+ offs += sym.n;
sym.prev = index - 1;
sym.next = offs == text.size() ? -1 : index + 1;
index++;
- symbols_.emplace_back(sym);
+ symbols.emplace_back(sym);
}
// seed the work queue with all possible 2-character tokens.
- for (size_t i = 1; i < symbols_.size(); ++i) {
+ for (size_t i = 1; i < symbols.size(); ++i) {
try_add_bigram(i - 1, i);
}
// keep substituting the highest frequency pairs for as long as we can.
- while (!work_queue_.empty()) {
- auto bigram = work_queue_.top();
- work_queue_.pop();
+ while (!work_queue.empty()) {
+ auto bigram = work_queue.top();
+ work_queue.pop();
- auto & left_sym = symbols_[bigram.left];
- auto & right_sym = symbols_[bigram.right];
+ auto & left_sym = symbols[bigram.left];
+ auto & right_sym = symbols[bigram.right];
// if one of the symbols already got merged, skip it.
if (left_sym.n == 0 || right_sym.n == 0 ||
left_sym.n += right_sym.n;
right_sym.n = 0;
- //printf("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
+ //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size);
// remove the right sym from the chain
left_sym.next = right_sym.next;
if (right_sym.next >= 0) {
- symbols_[right_sym.next].prev = bigram.left;
+ symbols[right_sym.next].prev = bigram.left;
}
// find more substitutions
try_add_bigram(bigram.left, left_sym.next);
}
- for (int i = 0; i != -1; i = symbols_[i].next) {
- auto & symbol = symbols_[i];
- auto token = vocab_.token_to_id.find(std::string(symbol.text, symbol.n));
+ for (int i = 0; i != -1; i = symbols[i].next) {
+ auto & symbol = symbols[i];
+ resegment(symbol, output);
+ }
+ }
+
+private:
+ void resegment(llm_symbol & symbol, std::vector<llama_vocab::id> & output) {
+ auto text = std::string(symbol.text, symbol.n);
+ auto token = vocab.token_to_id.find(text);
- if (token == vocab_.token_to_id.end()) {
- // output any symbols that did not form tokens as bytes.
- for (int j = 0; j < (int) symbol.n; ++j) {
- llama_vocab::id token_id = static_cast<uint8_t>(symbol.text[j]) + 3;
- output.push_back(token_id);
- }
- } else {
- output.push_back((*token).second);
+ // Do we need to support is_unused?
+ if (token != vocab.token_to_id.end()) {
+ output.push_back((*token).second);
+ return;
+ }
+
+ const auto p = rev_merge.find(text);
+
+ if (p == rev_merge.end()) {
+ // output any symbols that did not form tokens as bytes.
+ for (int j = 0; j < (int)symbol.n; ++j) {
+ llama_vocab::id token_id = llama_byte_to_token(vocab, symbol.text[j]);
+ output.push_back(token_id);
}
+ return;
}
+
+ resegment(symbols[p->second.first], output);
+ resegment(symbols[p->second.second], output);
}
-private:
void try_add_bigram(int left, int right) {
if (left == -1 || right == -1) {
return;
}
- const std::string text = std::string(symbols_[left].text, symbols_[left].n + symbols_[right].n);
- auto token = vocab_.token_to_id.find(text);
+ const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
+ auto token = vocab.token_to_id.find(text);
+
+ if (token == vocab.token_to_id.end()) {
+ return;
+ }
+
+ if (static_cast<size_t>((*token).second) >= vocab.id_to_token.size()) {
+ return;
+ }
+
+ const auto & tok_data = vocab.id_to_token[(*token).second];
+
+ llm_bigram_spm bigram;
+ bigram.left = left;
+ bigram.right = right;
+ bigram.score = tok_data.score;
+ bigram.size = text.size();
+
+ work_queue.push(bigram);
+
+ // Do we need to support is_unused?
+ rev_merge[text] = std::make_pair(left, right);
+ }
+
+ const llama_vocab & vocab;
+
+ std::vector<llm_symbol> symbols;
+ llm_bigram_spm::queue work_queue;
+
+ std::map<std::string, std::pair<int, int>> rev_merge;
+};
+
+// BPE tokenizer
+// adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License]
+// tried to simplify unicode stuff, so most likely does not work 100% correctly!
+
+// TODO: there are a lot of common parts between spm and bpe tokenizers, should be refactored and reused
+
+struct llm_bigram_bpe {
+ struct comparator {
+ bool operator()(const llm_bigram_bpe & l, const llm_bigram_bpe & r) const {
+ return l.rank > r.rank || (l.rank == r.rank && l.left > r.left);
+ }
+ };
+
+ using queue_storage = std::vector<llm_bigram_bpe>;
+ using queue = std::priority_queue<llm_bigram_bpe, queue_storage, comparator>;
+ llm_symbol::index left;
+ llm_symbol::index right;
+ std::string text;
+ int rank;
+ size_t size;
+};
+
+struct llm_tokenizer_bpe {
+ llm_tokenizer_bpe(const llama_vocab & vocab): vocab(vocab) {}
+
+ void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
+ int final_prev_index = -1;
+ auto word_collection = bpe_gpt2_preprocess(text);
+
+ symbols_final.clear();
+
+ for (auto & word : word_collection) {
+ work_queue = llm_bigram_bpe::queue();
+ symbols.clear();
+
+ int index = 0;
+ size_t offset = 0;
+
+ while (offset < word.size()) {
+ llm_symbol sym;
+ size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
+ sym.text = word.c_str() + offset;
+ sym.n = 1;
+ sym.n = char_len;
+ offset += sym.n;
+ sym.prev = index - 1;
+ sym.next = offset == word.size() ? -1 : index + 1;
+ index++;
+ symbols.emplace_back(sym);
+ }
+ for (size_t i = 1; i < symbols.size(); ++i) {
+ add_new_bigram(i - 1, i);
+ }
+
+ // build token(s)
+ while (!work_queue.empty()) {
+ auto bigram = work_queue.top();
+ work_queue.pop();
+
+ auto & left_symbol = symbols[bigram.left];
+ auto & right_symbol = symbols[bigram.right];
+
+ if (left_symbol.n == 0 || right_symbol.n == 0) {
+ continue;
+ }
+ std::string left_token = std::string(left_symbol.text, left_symbol.n);
+ std::string right_token = std::string(right_symbol.text, right_symbol.n);
+ if (left_token + right_token != bigram.text) {
+ continue; // Skip this bigram if it's outdated
+ }
+
+ // merge the right sym into the left one
+ left_symbol.n += right_symbol.n;
+ right_symbol.n = 0;
+
+ // remove the right sym from the chain
+ left_symbol.next = right_symbol.next;
+ if (right_symbol.next >= 0) {
+ symbols[right_symbol.next].prev = bigram.left;
+ }
+
+ add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol
+ add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol
+ }
+
+ // add the fnished tokens to the final list keeping correct order for next and prev
+ for (auto & sym : symbols) {
+ if (sym.n > 0) {
+ sym.prev = final_prev_index;
+ sym.next = -1;
+ if (final_prev_index != -1) {
+ symbols_final[final_prev_index].next = symbols_final.size();
+ }
+ symbols_final.emplace_back(sym);
+ final_prev_index = symbols_final.size() - 1;
+ }
+ }
+ }
+
+ symbols = symbols_final;
+
+ if (!symbols.empty()) {
+ for (int i = 0; i != -1; i = symbols[i].next) {
+ auto & symbol = symbols[i];
+ if (symbol.n == 0) {
+ continue;
+ }
+
+ const std::string str = std::string(symbol.text, symbol.n);
+ const auto token = vocab.token_to_id.find(str);
+
+ if (token == vocab.token_to_id.end()) {
+ for (auto j = str.begin(); j != str.end(); ++j) {
+ std::string byte_str(1, *j);
+ auto token_multibyte = vocab.token_to_id.find(byte_str);
+ if (token_multibyte == vocab.token_to_id.end()) {
+ try {
+ llama_token token_byte = llama_byte_to_token(vocab, *j);
+ output.push_back(token_byte);
+ } catch (const std::out_of_range & err) {
+ fprintf(stderr,"ERROR: byte not found in vocab: '%s'\n", byte_str.c_str());
+ }
+ } else {
+ output.push_back((*token_multibyte).second);
+ }
+ }
+ } else {
+ output.push_back((*token).second);
+ }
+ }
+ }
+ }
- if (token == vocab_.token_to_id.end()) {
+private:
+ void add_new_bigram(int left, int right) {
+ if (left == -1 || right == -1) {
return;
}
- if (static_cast<size_t>((*token).second) >= vocab_.id_to_token.size()) {
+ std::string left_token = std::string(symbols[left].text, symbols[left].n);
+ std::string right_token = std::string(symbols[right].text, symbols[right].n);
+
+ int rank_found = -1;
+
+ rank_found = vocab.find_bpe_rank(left_token, right_token);
+
+ if (rank_found < 0) {
return;
}
- const auto &tok_score = vocab_.id_to_token[(*token).second];
+ llm_bigram_bpe bigram;
- llama_sp_bigram bigram;
- bigram.left = left;
+ bigram.left = left;
bigram.right = right;
- bigram.score = tok_score.score;
- bigram.size = text.size();
- work_queue_.push(bigram);
+ bigram.text = left_token + right_token;
+ bigram.size = left_token.size() + right_token.size();
+ bigram.rank = rank_found;
+
+ work_queue.push(bigram);
+ }
+
+ // probably not 100% correct
+ static std::vector<std::string> bpe_gpt2_preprocess(const std::string & text) {
+ std::vector<std::string> words;
+
+ // ref: https://github.com/openai/gpt-2/blob/a74da5d99abaaba920de8131d64da2862a8f213b/src/encoder.py#L53
+ const std::string pattern = R"('s|'t|'re|'ve|'m|'ll|'d| ?[[:alpha:]]+| ?[[:digit:]]+| ?[^\s[:alpha:][:digit:]]+|\s+(?!\S)|\s+)";
+ const std::regex re(pattern);
+
+ auto words_begin = std::sregex_iterator(text.begin(), text.end(), re);
+ auto words_end = std::sregex_iterator();
+ auto n_words = std::distance(words_begin, words_end);
+ words.reserve(n_words);
+ for (auto it = words_begin; it != words_end; ++it) {
+ words.push_back(it->str());
+ }
+ return words;
+
}
- const llama_vocab & vocab_;
- std::vector<llama_sp_symbol> symbols_;
- llama_sp_bigram::queue work_queue_;
+ const llama_vocab & vocab;
+
+ std::vector<llm_symbol> symbols;
+ std::vector<llm_symbol> symbols_final;
+
+ llm_bigram_bpe::queue work_queue;
};
-static std::vector<llama_vocab::id> llama_tokenize(const llama_vocab & vocab, const std::string & text, bool bos) {
- llama_tokenizer tokenizer(vocab);
+static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab, std::string raw_text, bool bos) {
std::vector<llama_vocab::id> output;
- if (text.empty()) {
- return output;
+ // OG tokenizer behavior:
+ //
+ // tokenizer.encode('', add_bos=True) returns [1]
+ // tokenizer.encode('', add_bos=False) returns []
+
+ if (bos && vocab.special_bos_id != -1) {
+ output.push_back(vocab.special_bos_id);
}
- if (bos) {
- output.push_back(llama_token_bos());
+ if (raw_text.empty()) {
+ return output;
}
- tokenizer.tokenize(text, output);
+ switch (vocab.type) {
+ case LLAMA_VOCAB_TYPE_SPM:
+ {
+ // without adding this leading whitespace, we do not get the same results as the original tokenizer
+ raw_text = " " + raw_text;
+
+ llm_tokenizer_spm tokenizer(vocab);
+ llama_escape_whitespace(raw_text);
+ tokenizer.tokenize(raw_text, output);
+ } break;
+ case LLAMA_VOCAB_TYPE_BPE:
+ {
+ llm_tokenizer_bpe tokenizer(vocab);
+ tokenizer.tokenize(raw_text, output);
+ } break;
+ };
+
return output;
}
+//
+// grammar - internal
+//
+
+struct llama_partial_utf8 {
+ uint32_t value; // bit value so far (unshifted)
+ int n_remain; // num bytes remaining; -1 indicates invalid sequence
+};
+
+struct llama_grammar {
+ const std::vector<std::vector<llama_grammar_element>> rules;
+ std::vector<std::vector<const llama_grammar_element *>> stacks;
+
+ // buffer for partially generated UTF-8 sequence from accepted tokens
+ llama_partial_utf8 partial_utf8;
+};
+
+struct llama_grammar_candidate {
+ size_t index;
+ const uint32_t * code_points;
+ llama_partial_utf8 partial_utf8;
+};
+
+// Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as
+// pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`.
+std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
+ const char * src,
+ llama_partial_utf8 partial_start) {
+ static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 };
+ const char * pos = src;
+ std::vector<uint32_t> code_points;
+ uint32_t value = partial_start.value;
+ int n_remain = partial_start.n_remain;
+
+ // continue previous decode, if applicable
+ while (*pos != 0 && n_remain > 0) {
+ uint8_t next_byte = static_cast<uint8_t>(*pos);
+ if ((next_byte >> 6) != 2) {
+ // invalid sequence, abort
+ code_points.push_back(0);
+ return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, -1 });
+ }
+ value = (value << 6) + (next_byte & 0x3F);
+ ++pos;
+ --n_remain;
+ }
+
+ if (partial_start.n_remain > 0 && n_remain == 0) {
+ code_points.push_back(value);
+ }
+
+ // decode any subsequent utf-8 sequences, which may end in an incomplete one
+ while (*pos != 0) {
+ uint8_t first_byte = static_cast<uint8_t>(*pos);
+ uint8_t highbits = first_byte >> 4;
+ n_remain = lookup[highbits] - 1;
+
+ if (n_remain < 0) {
+ // invalid sequence, abort
+ code_points.clear();
+ code_points.push_back(0);
+ return std::make_pair(std::move(code_points), llama_partial_utf8{ 0, n_remain });
+ }
+
+ uint8_t mask = (1 << (7 - n_remain)) - 1;
+ value = first_byte & mask;
+ ++pos;
+ while (*pos != 0 && n_remain > 0) {
+ value = (value << 6) + (static_cast<uint8_t>(*pos) & 0x3F);
+ ++pos;
+ --n_remain;
+ }
+ if (n_remain == 0) {
+ code_points.push_back(value);
+ }
+ }
+ code_points.push_back(0);
+
+ return std::make_pair(std::move(code_points), llama_partial_utf8{ value, n_remain });
+}
+
+// returns true iff pos points to the end of one of the definitions of a rule
+static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) {
+ switch (pos->type) {
+ case LLAMA_GRETYPE_END: return true; // NOLINT
+ case LLAMA_GRETYPE_ALT: return true; // NOLINT
+ default: return false;
+ }
+}
+
+// returns true iff chr satisfies the char range at pos (regular or inverse range)
+// asserts that pos is pointing to a char range element
+static std::pair<bool, const llama_grammar_element *> llama_grammar_match_char(
+ const llama_grammar_element * pos,
+ const uint32_t chr) {
+
+ bool found = false;
+ bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
+
+ GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); // NOLINT
+
+ do {
+ if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
+ // inclusive range, e.g. [a-z]
+ found = found || (pos->value <= chr && chr <= pos[1].value);
+ pos += 2;
+ } else {
+ // exact char match, e.g. [a] or "a"
+ found = found || pos->value == chr;
+ pos += 1;
+ }
+ } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
+
+ return std::make_pair(found == is_positive_char, pos);
+}
+
+// returns true iff some continuation of the given partial UTF-8 sequence could satisfy the char
+// range at pos (regular or inverse range)
+// asserts that pos is pointing to a char range element
+static bool llama_grammar_match_partial_char(
+ const llama_grammar_element * pos,
+ const llama_partial_utf8 partial_utf8) {
+
+ bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR;
+ GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT);
+
+ uint32_t partial_value = partial_utf8.value;
+ int n_remain = partial_utf8.n_remain;
+
+ // invalid sequence or 7-bit char split across 2 bytes (overlong)
+ if (n_remain < 0 || (n_remain == 1 && partial_value < 2)) {
+ return false;
+ }
+
+ // range of possible code points this partial UTF-8 sequence could complete to
+ uint32_t low = partial_value << (n_remain * 6);
+ uint32_t high = low | ((1 << (n_remain * 6)) - 1);
+
+ if (low == 0) {
+ if (n_remain == 2) {
+ low = 1 << 11;
+ } else if (n_remain == 3) {
+ low = 1 << 16;
+ }
+ }
+
+ do {
+ if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) {
+ // inclusive range, e.g. [a-z]
+ if (pos->value <= high && low <= pos[1].value) {
+ return is_positive_char;
+ }
+ pos += 2;
+ } else {
+ // exact char match, e.g. [a] or "a"
+ if (low <= pos->value && pos->value <= high) {
+ return is_positive_char;
+ }
+ pos += 1;
+ }
+ } while (pos->type == LLAMA_GRETYPE_CHAR_ALT);
+
+ return !is_positive_char;
+}
+
+
+// transforms a grammar pushdown stack into N possible stacks, all ending
+// at a character range (terminal element)
+static void llama_grammar_advance_stack(
+ const std::vector<std::vector<llama_grammar_element>> & rules,
+ const std::vector<const llama_grammar_element *> & stack,
+ std::vector<std::vector<const llama_grammar_element *>> & new_stacks) {
+
+ if (stack.empty()) {
+ new_stacks.emplace_back(stack);
+ return;
+ }
+
+ const llama_grammar_element * pos = stack.back();
+
+ switch (pos->type) {
+ case LLAMA_GRETYPE_RULE_REF: {
+ const size_t rule_id = static_cast<size_t>(pos->value);
+ const llama_grammar_element * subpos = rules[rule_id].data();
+ do {
+ // init new stack without the top (pos)
+ std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
+ if (!llama_grammar_is_end_of_sequence(pos + 1)) {
+ // if this rule ref is followed by another element, add that to stack
+ new_stack.push_back(pos + 1);
+ }
+ if (!llama_grammar_is_end_of_sequence(subpos)) {
+ // if alternate is nonempty, add to stack
+ new_stack.push_back(subpos);
+ }
+ llama_grammar_advance_stack(rules, new_stack, new_stacks);
+ while (!llama_grammar_is_end_of_sequence(subpos)) {
+ // scan to end of alternate def
+ subpos++;
+ }
+ if (subpos->type == LLAMA_GRETYPE_ALT) {
+ // there's another alternate def of this rule to process
+ subpos++;
+ } else {
+ break;
+ }
+ } while (true);
+ break;
+ }
+ case LLAMA_GRETYPE_CHAR:
+ case LLAMA_GRETYPE_CHAR_NOT:
+ new_stacks.emplace_back(stack);
+ break;
+ default:
+ // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range
+ // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on
+ // those
+ GGML_ASSERT(false);
+ }
+}
+
+// takes a set of possible pushdown stacks on a grammar, which are required to
+// be positioned at a character range (see `llama_grammar_advance_stack`), and
+// produces the N possible stacks if the given char is accepted at those
+// positions
+static std::vector<std::vector<const llama_grammar_element *>> llama_grammar_accept(
+ const std::vector<std::vector<llama_grammar_element>> & rules,
+ const std::vector<std::vector<const llama_grammar_element *>> & stacks,
+ const uint32_t chr) {
+
+ std::vector<std::vector<const llama_grammar_element *>> new_stacks;
+
+ for (const auto & stack : stacks) {
+ if (stack.empty()) {
+ continue;
+ }
+
+ auto match = llama_grammar_match_char(stack.back(), chr);
+ if (match.first) {
+ const llama_grammar_element * pos = match.second;
+
+ // update top of stack to next element, if any
+ std::vector<const llama_grammar_element *> new_stack(stack.begin(), stack.end() - 1);
+ if (!llama_grammar_is_end_of_sequence(pos)) {
+ new_stack.push_back(pos);
+ }
+ llama_grammar_advance_stack(rules, new_stack, new_stacks);
+ }
+ }
+
+ return new_stacks;
+}
+
+static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
+ const std::vector<std::vector<llama_grammar_element>> & rules,
+ const std::vector<std::vector<const llama_grammar_element *>> & stacks,
+ const std::vector<llama_grammar_candidate> & candidates);
+
+static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates_for_stack(
+ const std::vector<std::vector<llama_grammar_element>> & rules,
+ const std::vector<const llama_grammar_element *> & stack,
+ const std::vector<llama_grammar_candidate> & candidates) {
+
+ std::vector<llama_grammar_candidate> rejects;
+
+ if (stack.empty()) {
+ for (auto tok : candidates) {
+ if (*tok.code_points != 0 || tok.partial_utf8.n_remain != 0) {
+ rejects.push_back(tok);
+ }
+ }
+ return rejects;
+ }
+
+ const llama_grammar_element * stack_pos = stack.back();
+
+ std::vector<llama_grammar_candidate> next_candidates;
+ for (auto tok : candidates) {
+ if (*tok.code_points == 0) {
+ // reached end of full codepoints in token, reject iff it ended in a partial sequence
+ // that cannot satisfy this position in grammar
+ if (tok.partial_utf8.n_remain != 0 &&
+ !llama_grammar_match_partial_char(stack_pos, tok.partial_utf8)) {
+ rejects.push_back(tok);
+ }
+ } else if (llama_grammar_match_char(stack_pos, *tok.code_points).first) {
+ next_candidates.push_back({ tok.index, tok.code_points + 1, tok.partial_utf8 });
+ } else {
+ rejects.push_back(tok);
+ }
+ }
+
+ const auto * stack_pos_after = llama_grammar_match_char(stack_pos, 0).second;
+
+ // update top of stack to next element, if any
+ std::vector<const llama_grammar_element *> stack_after(stack.begin(), stack.end() - 1);
+ if (!llama_grammar_is_end_of_sequence(stack_pos_after)) {
+ stack_after.push_back(stack_pos_after);
+ }
+ std::vector<std::vector<const llama_grammar_element *>> next_stacks;
+ llama_grammar_advance_stack(rules, stack_after, next_stacks);
+
+ auto next_rejects = llama_grammar_reject_candidates(rules, next_stacks, next_candidates);
+ for (auto tok : next_rejects) {
+ rejects.push_back({ tok.index, tok.code_points - 1, tok.partial_utf8 });
+ }
+
+ return rejects;
+}
+
+static std::vector<llama_grammar_candidate> llama_grammar_reject_candidates(
+ const std::vector<std::vector<llama_grammar_element>> & rules,
+ const std::vector<std::vector<const llama_grammar_element *>> & stacks,
+ const std::vector<llama_grammar_candidate> & candidates) {
+ GGML_ASSERT(!stacks.empty()); // REVIEW
+
+ if (candidates.empty()) {
+ return std::vector<llama_grammar_candidate>();
+ }
+
+ auto rejects = llama_grammar_reject_candidates_for_stack(rules, stacks.front(), candidates);
+
+ for (size_t i = 1, size = stacks.size(); i < size; ++i) {
+ rejects = llama_grammar_reject_candidates_for_stack(rules, stacks[i], rejects);
+ }
+ return rejects;
+}
+
+//
+// grammar - external
+//
+
+struct llama_grammar * llama_grammar_init(
+ const llama_grammar_element ** rules,
+ size_t n_rules,
+ size_t start_rule_index) {
+ const llama_grammar_element * pos;
+
+ // copy rule definitions into vectors
+ std::vector<std::vector<llama_grammar_element>> vec_rules(n_rules);
+ for (size_t i = 0; i < n_rules; i++) {
+ for (pos = rules[i]; pos->type != LLAMA_GRETYPE_END; pos++) {
+ vec_rules[i].push_back(*pos);
+ }
+ vec_rules[i].push_back({LLAMA_GRETYPE_END, 0});
+ }
+
+ // loop over alternates of start rule to build initial stacks
+ std::vector<std::vector<const llama_grammar_element *>> stacks;
+ pos = rules[start_rule_index];
+ do {
+ std::vector<const llama_grammar_element *> stack;
+ if (!llama_grammar_is_end_of_sequence(pos)) {
+ // if alternate is nonempty, add to stack
+ stack.push_back(pos);
+ }
+ llama_grammar_advance_stack(vec_rules, stack, stacks);
+ while (!llama_grammar_is_end_of_sequence(pos)) {
+ // scan to end of alternate def
+ pos++;
+ }
+ if (pos->type == LLAMA_GRETYPE_ALT) {
+ // there's another alternate def of this rule to process
+ pos++;
+ } else {
+ break;
+ }
+ } while (true);
+
+ return new llama_grammar{ std::move(vec_rules), std::move(stacks), {} };
+}
+
+void llama_grammar_free(struct llama_grammar * grammar) {
+ delete grammar;
+}
+
+struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar) {
+ llama_grammar * result = new llama_grammar{ grammar->rules, grammar->stacks, grammar->partial_utf8 };
+
+ // redirect elements in stacks to point to new rules
+ for (size_t is = 0; is < result->stacks.size(); is++) {
+ for (size_t ie = 0; ie < result->stacks[is].size(); ie++) {
+ for (size_t ir0 = 0; ir0 < grammar->rules.size(); ir0++) {
+ for (size_t ir1 = 0; ir1 < grammar->rules[ir0].size(); ir1++) {
+ if (grammar->stacks[is][ie] == &grammar->rules[ir0][ir1]) {
+ result->stacks[is][ie] = &result->rules[ir0][ir1];
+ }
+ }
+ }
+ }
+ }
+
+ return result;
+}
+
//
// sampling
//
void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) {
- assert(candidates->size > 0);
+ GGML_ASSERT(candidates->size > 0);
const int64_t t_start_sample_us = ggml_time_us();
return;
}
- const int64_t t_start_sample_us = ggml_time_us();
-
llama_sample_softmax(ctx, candidates);
+ const int64_t t_start_sample_us = ggml_time_us();
+
// Compute the cumulative probabilities
float cum_sum = 0.0f;
size_t last_idx = candidates->size;
for (size_t i = 0; i < candidates->size; ++i) {
cum_sum += candidates->data[i].p;
- // Check if the running sum is greater than p or if we have kept at least min_keep tokens
- if (cum_sum > p && i >= min_keep) {
- last_idx = i;
+ // Check if the running sum is at least p or if we have kept at least min_keep tokens
+ // we set the last index to i+1 to indicate that the current iterate should be included in the set
+ if (cum_sum >= p && i + 1 >= min_keep) {
+ last_idx = i + 1;
break;
}
}
return;
}
- const int64_t t_start_sample_us = ggml_time_us();
-
llama_sample_softmax(nullptr, candidates);
+ const int64_t t_start_sample_us = ggml_time_us();
// Compute the first and second derivatives
std::vector<float> first_derivatives(candidates->size - 1);
// Calculate absolute value of second derivatives
for (size_t i = 0; i < second_derivatives.size(); ++i) {
- second_derivatives[i] = abs(second_derivatives[i]);
+ second_derivatives[i] = std::abs(second_derivatives[i]);
}
// Normalize the second derivatives
- float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
- for (float & value : second_derivatives) {
- value /= second_derivatives_sum;
+ {
+ const float second_derivatives_sum = std::accumulate(second_derivatives.begin(), second_derivatives.end(), 0.0f);
+
+ if (second_derivatives_sum > 1e-6f) {
+ for (float & value : second_derivatives) {
+ value /= second_derivatives_sum;
+ }
+ } else {
+ for (float & value : second_derivatives) {
+ value = 1.0f / second_derivatives.size();
+ }
+ }
}
float cum_sum = 0.0f;
}
}
-
void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) {
// Reference implementation:
// https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr
return;
}
- const int64_t t_start_sample_us = ggml_time_us();
-
// Compute the softmax of logits and calculate entropy
llama_sample_softmax(nullptr, candidates);
+ const int64_t t_start_sample_us = ggml_time_us();
+
float entropy = 0.0f;
for (size_t i = 0; i < candidates->size; ++i) {
entropy += -candidates->data[i].p * logf(candidates->data[i].p);
}
}
+void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) {
+ GGML_ASSERT(ctx);
+ const int64_t t_start_sample_us = ggml_time_us();
+
+ bool allow_eos = false;
+ for (const auto & stack : grammar->stacks) {
+ if (stack.empty()) {
+ allow_eos = true;
+ break;
+ }
+ }
+
+ const llama_token eos = llama_token_eos(ctx);
+
+ std::vector<std::pair<std::vector<uint32_t>, llama_partial_utf8>> candidates_decoded;
+ std::vector<llama_grammar_candidate> candidates_grammar;
+
+ for (size_t i = 0; i < candidates->size; ++i) {
+ const llama_token id = candidates->data[i].id;
+ const std::string piece = llama_token_to_str(ctx, id);
+ if (id == eos) {
+ if (!allow_eos) {
+ candidates->data[i].logit = -INFINITY;
+ }
+ } else if (piece.empty() || piece[0] == 0) {
+ candidates->data[i].logit = -INFINITY;
+ } else {
+ candidates_decoded.push_back(decode_utf8(piece.c_str(), grammar->partial_utf8));
+ candidates_grammar.push_back({ i, candidates_decoded.back().first.data(), candidates_decoded.back().second });
+ }
+ }
+
+ const auto rejects = llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar);
+ for (const auto & reject : rejects) {
+ candidates->data[reject.index].logit = -INFINITY;
+ }
+
+ ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
+}
+
+static void llama_log_softmax(float * array, size_t size) {
+ float max_l = *std::max_element(array, array + size);
+ float sum = 0.f;
+ for (size_t i = 0; i < size; ++i) {
+ float p = expf(array[i] - max_l);
+ sum += p;
+ array[i] = p;
+ }
+
+ for (size_t i = 0; i < size; ++i) {
+ array[i] = logf(array[i] / sum);
+ }
+}
+
+void llama_sample_classifier_free_guidance(
+ struct llama_context * ctx,
+ llama_token_data_array * candidates,
+ struct llama_context * guidance_ctx,
+ float scale) {
+ int64_t t_start_sample_us = ggml_time_us();
+
+ GGML_ASSERT(ctx);
+
+ auto n_vocab = llama_n_vocab(ctx);
+
+ GGML_ASSERT(n_vocab == (int)candidates->size);
+ GGML_ASSERT(!candidates->sorted);
+
+ std::vector<float> logits_base;
+ logits_base.reserve(candidates->size);
+ for (size_t i = 0; i < candidates->size; ++i) {
+ logits_base.push_back(candidates->data[i].logit);
+ }
+ llama_log_softmax(logits_base.data(), candidates->size);
+
+ float* logits_guidance = llama_get_logits(guidance_ctx);
+ llama_log_softmax(logits_guidance, n_vocab);
+
+ for (int i = 0; i < n_vocab; ++i) {
+ float logit_guidance = logits_guidance[i];
+ float logit_base = logits_base[i];
+ candidates->data[i].logit = scale * (logit_base - logit_guidance) + logit_guidance;
+ }
+
+ if (ctx) {
+ ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
+ }
+}
llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu) {
- assert(ctx);
+ GGML_ASSERT(ctx);
+
auto N = float(llama_n_vocab(ctx));
int64_t t_start_sample_us;
t_start_sample_us = ggml_time_us();
if (ctx) {
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
- ctx->n_sample++;
}
return X;
}
llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
- assert(ctx);
int64_t t_start_sample_us;
t_start_sample_us = ggml_time_us();
return -log2f(candidate.p) > *mu;
}));
- // Normalize the probabilities of the remaining words
- llama_sample_softmax(ctx, candidates);
+ if (candidates->size == 0) {
+ candidates->size = 1;
+ }
- // Sample the next word X from the remaining words
if (ctx) {
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
}
+
+ // Normalize the probabilities of the remaining words
+ llama_sample_softmax(ctx, candidates);
+
+ // Sample the next word X from the remaining words
llama_token X = llama_sample_token(ctx, candidates);
t_start_sample_us = ggml_time_us();
}
llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) {
- assert(ctx);
+ GGML_ASSERT(ctx);
+
const int64_t t_start_sample_us = ggml_time_us();
llama_sample_softmax(nullptr, candidates);
return result;
}
+void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) {
+ const int64_t t_start_sample_us = ggml_time_us();
+
+ if (token == llama_token_eos(ctx)) {
+ for (const auto & stack : grammar->stacks) {
+ if (stack.empty()) {
+ return;
+ }
+ }
+ GGML_ASSERT(false);
+ }
+
+ const std::string piece = llama_token_to_str(ctx, token);
+
+ // Note terminating 0 in decoded string
+ const auto decoded = decode_utf8(piece.c_str(), grammar->partial_utf8);
+ const auto & code_points = decoded.first;
+ for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
+ grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it);
+ }
+ grammar->partial_utf8 = decoded.second;
+ GGML_ASSERT(!grammar->stacks.empty());
+
+ ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
+}
+
+//
+// Beam search
+//
+
+struct llama_beam {
+ std::vector<llama_token> tokens;
+ float p; // Cumulative beam probability (renormalized relative to all beams)
+ bool eob; // Initialize end-of-beam to false. Callback sets this to true.
+ // Sort beams by probability. In case of ties, prefer beams at eob.
+ bool operator<(const llama_beam & rhs) const {
+ return std::make_pair(p, eob) < std::make_pair(rhs.p, rhs.eob);
+ }
+ // Shift off first n tokens and discard them.
+ void shift_tokens(const size_t n) {
+ if (n) {
+ std::copy(tokens.begin() + n, tokens.end(), tokens.begin());
+ tokens.resize(tokens.size() - n);
+ }
+ }
+ llama_beam_view view() const { return {tokens.data(), tokens.size(), p, eob}; }
+};
+
+// A struct for calculating logit-related info.
+struct llama_logit_info {
+ const float * const logits;
+ const int n_vocab;
+ const float max_l;
+ const float normalizer;
+ struct sum_exp {
+ float max_l;
+ float operator()(float sum, float l) const { return sum + std::exp(l - max_l); }
+ };
+ llama_logit_info(llama_context * ctx)
+ : logits(llama_get_logits(ctx))
+ , n_vocab(llama_n_vocab(ctx))
+ , max_l(*std::max_element(logits, logits + n_vocab))
+ , normalizer(1.0f / std::accumulate(logits, logits + n_vocab, 0.0f, sum_exp{max_l}))
+ { }
+ llama_token_data get_token_data(const llama_token token_id) const {
+ constexpr auto p = std::numeric_limits<float>::quiet_NaN(); // never used
+ return {token_id, logits[token_id], p};
+ }
+ // Return top k token_data by logit.
+ std::vector<llama_token_data> top_k(size_t k) {
+ std::vector<llama_token_data> min_heap; // min-heap by logit
+ const llama_token k_min = std::min(static_cast<llama_token>(k), n_vocab);
+ min_heap.reserve(k_min);
+ for (llama_token token_id = 0 ; token_id < k_min ; ++token_id) {
+ min_heap.push_back(get_token_data(token_id));
+ }
+ auto comp = [](const llama_token_data & a, const llama_token_data & b) { return a.logit > b.logit; };
+ std::make_heap(min_heap.begin(), min_heap.end(), comp);
+ for (llama_token token_id = k_min ; token_id < n_vocab ; ++token_id) {
+ if (min_heap.front().logit < logits[token_id]) {
+ std::pop_heap(min_heap.begin(), min_heap.end(), comp);
+ min_heap.back().id = token_id;
+ min_heap.back().logit = logits[token_id];
+ std::push_heap(min_heap.begin(), min_heap.end(), comp);
+ }
+ }
+ return min_heap;
+ }
+ float probability_from_logit(float logit) const {
+ return normalizer * std::exp(logit - max_l);
+ }
+};
+
+struct llama_beam_search_data {
+ llama_context * ctx;
+ size_t n_beams;
+ int n_past;
+ int n_predict;
+ int n_threads;
+ std::vector<llama_beam> beams;
+ std::vector<llama_beam> next_beams;
+
+ // Re-calculated on each loop iteration
+ size_t common_prefix_length;
+
+ // Used to communicate to/from callback on beams state.
+ std::vector<llama_beam_view> beam_views;
+
+ llama_beam_search_data(llama_context * ctx, size_t n_beams, int n_past, int n_predict, int n_threads)
+ : ctx(ctx)
+ , n_beams(n_beams)
+ , n_past(n_past)
+ , n_predict(n_predict)
+ , n_threads(n_threads)
+ , beam_views(n_beams) {
+ beams.reserve(n_beams);
+ next_beams.reserve(n_beams);
+ }
+
+ // Collapse beams to a single beam given by index.
+ void collapse_beams(const size_t beam_idx) {
+ if (0u < beam_idx) {
+ std::swap(beams[0], beams[beam_idx]);
+ }
+ beams.resize(1);
+ }
+
+ // Min-heaps are used to efficiently collect the top-k elements (k=n_beams).
+ // The repetative patterns below reflect the 2 stages of heaps:
+ // * Gather elements until the vector is full, then call std::make_heap() on it.
+ // * If the heap is full and a new element is found that should be included, pop the
+ // least element to the back(), replace it with the new, then push it into the heap.
+ void fill_next_beams_by_top_probabilities(llama_beam & beam) {
+ // Min-heaps use a greater-than comparator.
+ const auto comp = [](const llama_beam & a, const llama_beam & b) { return a.p > b.p; };
+ if (beam.eob) {
+ // beam is at end-of-sentence, so just copy it to next_beams if its probability is high enough.
+ if (next_beams.size() < n_beams) {
+ next_beams.push_back(std::move(beam));
+ if (next_beams.size() == n_beams) {
+ std::make_heap(next_beams.begin(), next_beams.end(), comp);
+ }
+ } else if (next_beams.front().p < beam.p) {
+ std::pop_heap(next_beams.begin(), next_beams.end(), comp);
+ next_beams.back() = std::move(beam);
+ std::push_heap(next_beams.begin(), next_beams.end(), comp);
+ }
+ } else {
+ // beam is not at end-of-sentence, so branch with next top_k tokens.
+ if (!beam.tokens.empty()) {
+ llama_eval(ctx, beam.tokens.data(), beam.tokens.size(), n_past, n_threads);
+ }
+ llama_logit_info logit_info(ctx);
+ std::vector<llama_token_data> next_tokens = logit_info.top_k(n_beams);
+ size_t i=0;
+ if (next_beams.size() < n_beams) {
+ for (; next_beams.size() < n_beams ; ++i) {
+ llama_beam next_beam = beam;
+ next_beam.tokens.push_back(next_tokens[i].id);
+ next_beam.p *= logit_info.probability_from_logit(next_tokens[i].logit);
+ next_beams.push_back(std::move(next_beam));
+ }
+ std::make_heap(next_beams.begin(), next_beams.end(), comp);
+ } else {
+ for (; next_beams.front().p == 0.0f ; ++i) {
+ std::pop_heap(next_beams.begin(), next_beams.end(), comp);
+ next_beams.back() = beam;
+ next_beams.back().tokens.push_back(next_tokens[i].id);
+ next_beams.back().p *= logit_info.probability_from_logit(next_tokens[i].logit);
+ std::push_heap(next_beams.begin(), next_beams.end(), comp);
+ }
+ }
+ for (; i < n_beams ; ++i) {
+ const float next_p = beam.p * logit_info.probability_from_logit(next_tokens[i].logit);
+ if (next_beams.front().p < next_p) {
+ std::pop_heap(next_beams.begin(), next_beams.end(), comp);
+ next_beams.back() = beam;
+ next_beams.back().tokens.push_back(next_tokens[i].id);
+ next_beams.back().p = next_p;
+ std::push_heap(next_beams.begin(), next_beams.end(), comp);
+ }
+ }
+ }
+ }
+
+ // Find common_prefix_length based on beams.
+ // Requires beams is not empty.
+ size_t find_common_prefix_length() {
+ size_t common_prefix_length = beams[0].tokens.size();
+ for (size_t i = 1 ; i < beams.size() ; ++i) {
+ common_prefix_length = std::min(common_prefix_length, beams[i].tokens.size());
+ for (size_t j = 0 ; j < common_prefix_length ; ++j) {
+ if (beams[0].tokens[j] != beams[i].tokens[j]) {
+ common_prefix_length = j;
+ break;
+ }
+ }
+ }
+ return common_prefix_length;
+ }
+
+ // Construct beams_state to send back to caller via the callback function.
+ // Side effect: set common_prefix_length = find_common_prefix_length();
+ llama_beams_state get_beams_state(const bool last_call) {
+ for (size_t i = 0 ; i < beams.size() ; ++i) {
+ beam_views[i] = beams[i].view();
+ }
+ common_prefix_length = find_common_prefix_length();
+ return {beam_views.data(), beams.size(), common_prefix_length, last_call};
+ }
+
+ // Loop:
+ // * while i < n_predict, AND
+ // * any of the beams have not yet reached end-of-beam (eob), AND
+ // * the highest probability beam(s) (plural in case of ties) are not at end-of-sentence
+ // (since all other beam probabilities can only decrease)
+ void loop(const llama_beam_search_callback_fn_t callback, void * const callback_data) {
+ beams.push_back({{}, 1.0f, false}); // Start with one empty beam w/ probability = 1.0 and !eob.
+ const auto not_eob = [](const llama_beam & beam) { return !beam.eob; };
+ for (int i = 0 ; i < n_predict && std::any_of(beams.begin(),beams.end(),not_eob) &&
+ !beams[top_beam_index()].eob ; ++i) {
+ callback(callback_data, get_beams_state(false)); // Sets common_prefix_length
+ update_beams_from_beam_views(); // Update values (p,eob) that callback may have changed.
+ if (common_prefix_length) {
+ llama_eval(ctx, beams[0].tokens.data(), common_prefix_length, n_past, n_threads);
+ n_past += common_prefix_length;
+ }
+ // Zero-out next_beam probabilities to place them last in following min-heap.
+ std::for_each(next_beams.begin(), next_beams.end(), [](llama_beam & beam) { beam.p = 0.0f; });
+ for (llama_beam & beam : beams) {
+ beam.shift_tokens(common_prefix_length);
+ fill_next_beams_by_top_probabilities(beam);
+ }
+ // next_beams become the beams of next/final iteration. Swap them to re-use memory.
+ beams.swap(next_beams);
+ renormalize_beam_probabilities(beams);
+ }
+ collapse_beams(top_beam_index());
+ callback(callback_data, get_beams_state(true));
+ }
+
+ // As beams grow, the cumulative probabilities decrease.
+ // Renormalize them to avoid floating point underflow.
+ static void renormalize_beam_probabilities(std::vector<llama_beam> & beams) {
+ const auto sum_p = [](float sum, llama_beam & beam) { return sum + beam.p; };
+ const float inv_sum = 1.0f / std::accumulate(beams.begin(), beams.end(), 0.0f, sum_p);
+ std::for_each(beams.begin(), beams.end(), [=](llama_beam & beam) { beam.p *= inv_sum; });
+ }
+
+ // Assumes beams is non-empty. Uses llama_beam::operator<() for ordering.
+ size_t top_beam_index() {
+ return std::max_element(beams.begin(), beams.end()) - beams.begin();
+ }
+
+ // Copy (p,eob) for each beam which may have been changed by the callback.
+ void update_beams_from_beam_views() {
+ for (size_t i = 0 ; i < beams.size() ; ++i) {
+ beams[i].p = beam_views[i].p;
+ beams[i].eob = beam_views[i].eob;
+ }
+ }
+};
+
+void llama_beam_search(llama_context * ctx,
+ llama_beam_search_callback_fn_t callback, void * callback_data,
+ size_t n_beams, int n_past, int n_predict, int n_threads) {
+ assert(ctx);
+ const int64_t t_start_sample_us = ggml_time_us();
+
+ llama_beam_search_data beam_search_data(ctx, n_beams, n_past, n_predict, n_threads);
+
+ beam_search_data.loop(callback, callback_data);
+
+ ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
+ ctx->n_sample++;
+}
+
//
// quantization
//
-static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, enum llama_ftype ftype, int nthread) {
+template <typename T>
+struct no_init {
+ T value;
+ no_init() { /* do nothing */ }
+};
+
+static void llama_convert_tensor_internal(
+ struct ggml_tensor * tensor, std::vector<no_init<float>> & output, std::vector<std::thread> & workers,
+ const size_t nelements, const int nthread
+) {
+ if (output.size() < nelements) {
+ output.resize(nelements);
+ }
+ float * f32_output = (float *) output.data();
+
+ ggml_type_traits_t qtype;
+ if (ggml_is_quantized(tensor->type)) {
+ qtype = ggml_internal_get_type_traits(tensor->type);
+ if (qtype.to_float == NULL) {
+ throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
+ }
+ } else if (tensor->type != GGML_TYPE_F16) {
+ throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
+ }
+
+ if (nthread < 2) {
+ if (tensor->type == GGML_TYPE_F16) {
+ ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
+ } else if (ggml_is_quantized(tensor->type)) {
+ qtype.to_float(tensor->data, f32_output, nelements);
+ } else {
+ GGML_ASSERT(false); // unreachable
+ }
+ return;
+ }
+
+ auto block_size = tensor->type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor->type);
+ auto block_size_bytes = ggml_type_size(tensor->type);
+
+ GGML_ASSERT(nelements % block_size == 0);
+ auto nblocks = nelements / block_size;
+ auto blocks_per_thread = nblocks / nthread;
+ auto spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
+
+ for (auto tnum = 0, in_buff_offs = 0, out_buff_offs = 0; tnum < nthread; tnum++) {
+ auto thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
+ auto thr_elems = thr_blocks * block_size; // number of elements for this thread
+ auto thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
+
+ auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
+ if (typ == GGML_TYPE_F16) {
+ ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
+ } else {
+ qtype.to_float(inbuf, outbuf, nels);
+ }
+ };
+ workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
+ in_buff_offs += thr_block_bytes;
+ out_buff_offs += thr_elems;
+ }
+ for (auto & w : workers) { w.join(); }
+ workers.clear();
+}
+
+#ifdef GGML_USE_K_QUANTS
+static ggml_type get_k_quant_type(
+ ggml_type new_type, const ggml_tensor * tensor, const llama_model & model, llama_ftype ftype, int * i_attention_wv,
+ int n_attention_wv, int * i_feed_forward_w2, int n_feed_forward_w2
+) {
+ const std::string name = ggml_get_name(tensor);
+ // TODO: avoid hardcoded tensor names - use the TN_* constants
+ const auto tn = LLM_TN(model.arch);
+
+ auto use_more_bits = [](int i_layer, int num_layers) -> bool {
+ return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2;
+ };
+
+ if (name == tn(LLM_TENSOR_OUTPUT, "weight")) {
+ int nx = tensor->ne[0];
+ if (model.arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
+ new_type = GGML_TYPE_Q8_0;
+ }
+ else if (new_type != GGML_TYPE_Q8_0) {
+ new_type = GGML_TYPE_Q6_K;
+ }
+ } else if (name.find("attn_v.weight") != std::string::npos) {
+ if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
+ new_type = *i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
+ }
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
+ else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
+ use_more_bits(*i_attention_wv, n_attention_wv)) new_type = GGML_TYPE_Q6_K;
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && *i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
+ else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
+ (*i_attention_wv < n_attention_wv/8 || *i_attention_wv >= 7*n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
+ if (model.type == MODEL_70B) {
+ // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
+ // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
+ // nearly negligible increase in model size by quantizing this tensor with more bits:
+ if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
+ }
+ ++*i_attention_wv;
+ } else if (name.find("ffn_down.weight") != std::string::npos) {
+ if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
+ new_type = *i_feed_forward_w2 < 2 ? GGML_TYPE_Q5_K
+ : model.arch != LLM_ARCH_FALCON || use_more_bits(*i_feed_forward_w2, n_feed_forward_w2) ? GGML_TYPE_Q4_K
+ : GGML_TYPE_Q3_K;
+ }
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
+ new_type = model.arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
+ }
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
+ if (model.arch == LLM_ARCH_FALCON) {
+ new_type = *i_feed_forward_w2 < 2 ? GGML_TYPE_Q6_K :
+ use_more_bits(*i_feed_forward_w2, n_feed_forward_w2) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
+ } else {
+ if (use_more_bits(*i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
+ }
+ }
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(*i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K;
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && model.arch != LLM_ARCH_FALCON && *i_feed_forward_w2 < 4) {
+ new_type = GGML_TYPE_Q5_K;
+ }
+ ++*i_feed_forward_w2;
+ } else if (name.find("attn_output.weight") != std::string::npos) {
+ if (model.arch != LLM_ARCH_FALCON) {
+ if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K ) new_type = GGML_TYPE_Q3_K;
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) new_type = GGML_TYPE_Q4_K;
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
+ } else {
+ if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
+ }
+ }
+ else if (name.find("attn_qkv.weight") != std::string::npos) {
+ if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
+ }
+ else if (name.find("ffn_gate.weight") != std::string::npos || name.find("ffn_up.weight") != std::string::npos) {
+ if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
+ }
+ // This can be used to reduce the size of the Q5_K_S model.
+ // The associated PPL increase is fully in line with the size reduction
+ //else {
+ // if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
+ //}
+ bool convert_incompatible_tensor = false;
+ if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K ||
+ new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) {
+ int nx = tensor->ne[0];
+ int ny = tensor->ne[1];
+ if (nx % QK_K != 0) {
+ LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for k-quants\n", __func__, nx, ny, QK_K);
+ convert_incompatible_tensor = true;
+ }
+ }
+ if (convert_incompatible_tensor) {
+ if (name == tn(LLM_TENSOR_OUTPUT, "weight")) {
+ new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing.
+ LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n");
+ } else if (name == tn(LLM_TENSOR_TOKEN_EMBD, "weight")) {
+ new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing.
+ LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n");
+ } else {
+ throw std::runtime_error("Unsupported tensor size encountered\n");
+ }
+ }
+
+ return new_type;
+}
+#endif
+
+static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
ggml_type quantized_type;
- switch (ftype) {
+ llama_ftype ftype = params->ftype;
+
+ switch (params->ftype) {
case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break;
case LLAMA_FTYPE_MOSTLY_Q4_1: quantized_type = GGML_TYPE_Q4_1; break;
case LLAMA_FTYPE_MOSTLY_Q5_0: quantized_type = GGML_TYPE_Q5_0; break;
case LLAMA_FTYPE_MOSTLY_Q5_1: quantized_type = GGML_TYPE_Q5_1; break;
case LLAMA_FTYPE_MOSTLY_Q8_0: quantized_type = GGML_TYPE_Q8_0; break;
- default: throw format("invalid output file type %d\n", ftype);
- };
+ case LLAMA_FTYPE_MOSTLY_F16: quantized_type = GGML_TYPE_F16; break;
+ case LLAMA_FTYPE_ALL_F32: quantized_type = GGML_TYPE_F32; break;
+
+#ifdef GGML_USE_K_QUANTS
+ // K-quants
+ case LLAMA_FTYPE_MOSTLY_Q2_K: quantized_type = GGML_TYPE_Q2_K; break;
+ case LLAMA_FTYPE_MOSTLY_Q3_K_S:
+ case LLAMA_FTYPE_MOSTLY_Q3_K_M:
+ case LLAMA_FTYPE_MOSTLY_Q3_K_L: quantized_type = GGML_TYPE_Q3_K; break;
+ case LLAMA_FTYPE_MOSTLY_Q4_K_S:
+ case LLAMA_FTYPE_MOSTLY_Q4_K_M: quantized_type = GGML_TYPE_Q4_K; break;
+ case LLAMA_FTYPE_MOSTLY_Q5_K_S:
+ case LLAMA_FTYPE_MOSTLY_Q5_K_M: quantized_type = GGML_TYPE_Q5_K; break;
+ case LLAMA_FTYPE_MOSTLY_Q6_K: quantized_type = GGML_TYPE_Q6_K; break;
+#endif
+ default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
+ }
+
+ int nthread = params->nthread;
if (nthread <= 0) {
nthread = std::thread::hardware_concurrency();
}
- std::unique_ptr<llama_model_loader> model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false,
- /*vocab_only*/ false));
- llama_file_saver file_saver(fname_out.c_str(), model_loader->file_loaders.at(0).get(), ftype);
+ std::unique_ptr<llama_model_loader> ml(new llama_model_loader(fname_inp, /*use_mmap*/ false));
+
+ llama_model model;
+ llm_load_arch(*ml, model);
+ llm_load_hparams(*ml, model, 0, 0, 0);
+
+ if (params->only_copy) {
+ ftype = model.ftype;
+ }
+
+ const size_t align = GGUF_DEFAULT_ALIGNMENT;
+ struct gguf_context * ctx_out = gguf_init_empty();
+
+ // copy the KV pairs from the input file
+ gguf_set_kv (ctx_out, ml->ctx_gguf);
+ gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
+ gguf_set_val_u32(ctx_out, "general.file_type", ftype);
+
+#ifdef GGML_USE_K_QUANTS
+ int n_attention_wv = 0;
+ int n_feed_forward_w2 = 0;
+
+ for (int i = 0; i < ml->n_tensors; ++i) {
+ struct ggml_tensor * meta = ml->get_tensor_meta(i);
+
+ const std::string name = ggml_get_name(meta);
+
+ // TODO: avoid hardcoded tensor names - use the TN_* constants
+ if (name.find("attn_v.weight") != std::string::npos) {
+ ++n_attention_wv;
+ }
+ else if (name.find("ffn_down.weight") != std::string::npos) {
+ ++n_feed_forward_w2;
+ }
+ }
+ if (n_attention_wv != n_feed_forward_w2 || (uint32_t)n_attention_wv != model.hparams.n_layer) {
+ LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_feed_forward_w2 = %d, hparams.n_layer = %d\n",
+ __func__, n_attention_wv, n_feed_forward_w2, model.hparams.n_layer);
+ }
+
+ int i_attention_wv = 0;
+ int i_feed_forward_w2 = 0;
+#endif
size_t total_size_org = 0;
size_t total_size_new = 0;
std::vector<int64_t> hist_all(1 << 4, 0);
std::vector<std::thread> workers;
+ workers.reserve(nthread);
std::mutex mutex;
- size_t idx = 0;
- for (llama_load_tensor & tensor : model_loader->tensors_map.tensors) {
- llama_buffer read_data;
- read_data.resize(tensor.size);
- tensor.data = read_data.addr;
- model_loader->load_data_for(tensor);
+ int idx = 0;
+
+ std::vector<no_init<uint8_t>> read_data;
+ std::vector<no_init<uint8_t>> work;
+ std::vector<no_init<float>> f32_conv_buf;
+
+ // populate the original tensors so we get an initial meta data
+ for (int i = 0; i < ml->n_tensors; ++i) {
+ struct ggml_tensor * meta = ml->get_tensor_meta(i);
+ gguf_add_tensor(ctx_out, meta);
+ }
+
+ std::ofstream fout(fname_out, std::ios::binary);
+
+ const size_t meta_size = gguf_get_meta_size(ctx_out);
+
+ LLAMA_LOG_INFO("%s: meta size = %zu bytes\n", __func__, meta_size);
+
+ // placeholder for the meta data
+ ::zeros(fout, meta_size);
- printf("[%4zu/%4zu] %36s - %16s, type = %6s, ",
- ++idx, model_loader->tensors_map.tensors.size(),
- tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(),
- ggml_type_name(tensor.type));
+ for (int i = 0; i < ml->n_tensors; ++i) {
+ struct ggml_tensor * tensor = ml->get_tensor_meta(i);
+
+ const std::string name = ggml_get_name(tensor);
+
+ if (read_data.size() < ggml_nbytes(tensor)) {
+ read_data.resize(ggml_nbytes(tensor));
+ }
+ tensor->data = read_data.data();
+ ml->load_data_for(tensor);
+
+ LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
+ ++idx, ml->n_tensors,
+ ggml_get_name(tensor),
+ llama_format_tensor_shape(tensor).c_str(),
+ ggml_type_name(tensor->type));
// This used to be a regex, but <regex> has an extreme cost to compile times.
- bool quantize = tensor.name.rfind("weight") == tensor.name.size() - 6; // ends with 'weight'?
+ bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
// quantize only 2D tensors
- quantize &= (tensor.ne.size() == 2);
-
- // uncomment this to keep the output layer in FP16
- //if (tensor.name == "output.weight") {
- // quantize = false;
- //}
+ quantize &= (tensor->n_dims == 2);
+ quantize &= params->quantize_output_tensor || name != "output.weight";
+ quantize &= !params->only_copy;
enum ggml_type new_type;
void * new_data;
size_t new_size;
- llama_buffer work;
+ if (quantize) {
+ new_type = quantized_type;
+#ifdef GGML_USE_K_QUANTS
+ new_type = get_k_quant_type(
+ new_type, tensor, model, ftype, &i_attention_wv, n_attention_wv, &i_feed_forward_w2, n_feed_forward_w2
+ );
+#endif
+ // If we've decided to quantize to the same type the tensor is already
+ // in then there's nothing to do.
+ quantize = tensor->type != new_type;
+ }
if (!quantize) {
- new_type = tensor.type;
- new_data = tensor.data;
- new_size = tensor.size;
- printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0);
+ new_type = tensor->type;
+ new_data = tensor->data;
+ new_size = ggml_nbytes(tensor);
+ LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
} else {
- new_type = quantized_type;
+ const size_t nelements = ggml_nelements(tensor);
+
float * f32_data;
- size_t nelements = tensor.ne.at(0) * tensor.ne.at(1);
- llama_buffer f32_conv_buf;
- if (tensor.type == GGML_TYPE_F32) {
- f32_data = (float *) tensor.data;
- } else if (tensor.type == GGML_TYPE_F16) {
- f32_conv_buf.resize(nelements * sizeof(float));
- f32_data = (float *) f32_conv_buf.addr;
- const auto * f16_data = (const ggml_fp16_t *) tensor.data;
- for (size_t i = 0; i < nelements; i++) {
- f32_data[i] = ggml_fp16_to_fp32(f16_data[i]);
- }
+
+ if (tensor->type == GGML_TYPE_F32) {
+ f32_data = (float *) tensor->data;
+ } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
+ throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
} else {
- throw format("type %s unsupported for integer quantization", ggml_type_name(tensor.type));
+ llama_convert_tensor_internal(tensor, f32_conv_buf, workers, nelements, nthread);
+ f32_data = (float *) f32_conv_buf.data();
}
- printf("quantizing .. ");
+ LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
fflush(stdout);
- work.resize(nelements * 4); // upper bound on size
- new_data = work.addr;
- std::vector<int64_t> hist_cur(1 << 4, 0);
+ if (work.size() < nelements * 4) {
+ work.resize(nelements * 4); // upper bound on size
+ }
+ new_data = work.data();
+ std::array<int64_t, 1 << 4> hist_cur = {};
- int chunk_size = 32 * 512;
+ static const int chunk_size = 32 * 512;
const int nchunk = (nelements + chunk_size - 1)/chunk_size;
const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
if (nthread_use < 2) {
} else {
size_t counter = 0;
new_size = 0;
- auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size] () {
- std::vector<int64_t> local_hist;
+ auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements]() {
+ std::array<int64_t, 1 << 4> local_hist = {};
size_t local_size = 0;
while (true) {
std::unique_lock<std::mutex> lock(mutex);
size_t first = counter; counter += chunk_size;
if (first >= nelements) {
- if (!local_hist.empty()) {
+ if (local_size > 0) {
for (int j=0; j<int(local_hist.size()); ++j) {
hist_cur[j] += local_hist[j];
}
}
lock.unlock();
size_t last = std::min(nelements, first + chunk_size);
- if (local_hist.empty()) {
- local_hist.resize(hist_cur.size(), 0);
- }
local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first, last - first, local_hist.data());
}
};
- if ((int) workers.size() < nthread_use - 1) {
- workers.resize(nthread_use - 1);
- }
for (int it = 0; it < nthread_use - 1; ++it) {
- workers[it] = std::thread(compute);
+ workers.emplace_back(compute);
}
compute();
- for (int it = 0; it < nthread_use - 1; ++it) {
- workers[it].join();
- }
+ for (auto & w : workers) { w.join(); }
+ workers.clear();
}
- printf("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0);
+ LLAMA_LOG_INFO("size = %8.2f MB -> %8.2f MB | hist: ", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
+ int64_t tot_count = 0;
for (size_t i = 0; i < hist_cur.size(); i++) {
hist_all[i] += hist_cur[i];
+ tot_count += hist_cur[i];
}
- for (size_t i = 0; i < hist_cur.size(); i++) {
- printf("%5.3f ", hist_cur[i] / float(nelements));
- }
- printf("\n");
- }
- total_size_org += tensor.size;
- total_size_new += new_size;
- file_saver.write_tensor(tensor, new_type, new_data, new_size);
- }
-
- printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
- printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
-
- {
- int64_t sum_all = 0;
- for (size_t i = 0; i < hist_all.size(); i++) {
- sum_all += hist_all[i];
- }
-
- printf("%s: hist: ", __func__);
- for (size_t i = 0; i < hist_all.size(); i++) {
- printf("%5.3f ", hist_all[i] / float(sum_all));
- }
- printf("\n");
- }
-}
-
-//
-// interface implementation
-//
-
-struct llama_context * llama_init_from_file(
- const char * path_model,
- struct llama_context_params params) {
- ggml_time_init();
-
- llama_context * ctx = new llama_context;
-
- if (params.seed < 0) {
- params.seed = time(NULL);
- }
-
- unsigned cur_percentage = 0;
- if (params.progress_callback == NULL) {
- params.progress_callback_user_data = &cur_percentage;
- params.progress_callback = [](float progress, void * ctx) {
- unsigned * cur_percentage_p = (unsigned *) ctx;
- unsigned percentage = (unsigned) (100 * progress);
- while (percentage > *cur_percentage_p) {
- *cur_percentage_p = percentage;
- fprintf(stderr, ".");
- fflush(stderr);
- if (percentage >= 100) {
- fprintf(stderr, "\n");
+ if (tot_count > 0) {
+ for (size_t i = 0; i < hist_cur.size(); i++) {
+ LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
}
}
- };
- }
+ LLAMA_LOG_INFO("\n");
+ }
+ total_size_org += ggml_nbytes(tensor);
+ total_size_new += new_size;
- ctx->rng = std::mt19937(params.seed);
- ctx->logits_all = params.logits_all;
+ // update the gguf meta data as we go
+ gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
+ gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size);
- ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
+ // write tensor data + padding
+ fout.write((const char *) new_data, new_size);
+ zeros(fout, GGML_PAD(new_size, align) - new_size);
+ }
- if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_gpu_layers, memory_type,
- params.use_mmap, params.use_mlock, params.vocab_only,
- params.progress_callback, params.progress_callback_user_data)) {
- fprintf(stderr, "%s: failed to load model\n", __func__);
- llama_free(ctx);
- return nullptr;
+ // go back to beginning of file and write the updated meta data
+ {
+ fout.seekp(0);
+ std::vector<uint8_t> data(gguf_get_meta_size(ctx_out));
+ gguf_get_meta_data(ctx_out, data.data());
+ fout.write((const char *) data.data(), data.size());
}
- // reserve memory for context buffers
- if (!params.vocab_only) {
- if (!kv_cache_init(ctx->model.hparams, ctx->model.kv_self, memory_type, ctx->model.hparams.n_ctx)) {
- fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__);
- llama_free(ctx);
- return nullptr;
- }
+ fout.close();
- {
- const size_t memory_size = ggml_nbytes(ctx->model.kv_self.k) + ggml_nbytes(ctx->model.kv_self.v);
- fprintf(stderr, "%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
- }
+ gguf_free(ctx_out);
- const auto & hparams = ctx->model.hparams;
+ LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
+ LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
- // resized during inference
- if (params.logits_all) {
- ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab);
- } else {
- ctx->logits.reserve(hparams.n_vocab);
+ // print histogram for all tensors
+ {
+ int64_t sum_all = 0;
+ for (size_t i = 0; i < hist_all.size(); i++) {
+ sum_all += hist_all[i];
}
- if (params.embedding){
- ctx->embedding.resize(hparams.n_embd);
+ if (sum_all > 0) {
+ LLAMA_LOG_INFO("%s: hist: ", __func__);
+ for (size_t i = 0; i < hist_all.size(); i++) {
+ LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all));
+ }
+ LLAMA_LOG_INFO("\n");
}
-
- ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type));
-
- ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0().at(ctx->model.type));
- ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type));
- }
-
- return ctx;
-}
-
-void llama_free(struct llama_context * ctx) {
- delete ctx;
-}
-
-int llama_model_quantize(
- const char * fname_inp,
- const char * fname_out,
- enum llama_ftype ftype,
- int nthread) {
- try {
- llama_model_quantize_internal(fname_inp, fname_out, ftype, nthread);
- return 0;
- } catch (const std::string & err) {
- fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.c_str());
- return 1;
}
}
-int llama_apply_lora_from_file_internal(struct llama_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) {
- fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
-
- auto & model = ctx->model;
+// TODO: after the GGUF PR, this likely won't work and needs to be updated
+int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) {
+ LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
const int64_t t_start_lora_us = ggml_time_us();
auto fin = std::ifstream(path_lora, std::ios::binary);
if (!fin) {
- fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora);
+ LLAMA_LOG_ERROR("%s: failed to open '%s'\n", __func__, path_lora);
return 1;
}
{
uint32_t magic;
fin.read((char *) &magic, sizeof(magic));
- if (magic != LLAMA_FILE_MAGIC_GGLA) {
- fprintf(stderr, "%s: bad file magic\n", __func__);
- return 1;
- }
uint32_t format_version;
fin.read((char *) &format_version, sizeof(format_version));
if (format_version != 1) {
- fprintf(stderr, "%s: unsupported file version\n", __func__ );
+ LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
return 1;
}
}
fin.read((char *) &lora_alpha, sizeof(lora_alpha));
float scaling = (float)lora_alpha / (float)lora_r;
- fprintf(stderr, "%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
-
+ LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
// create a temporary ggml context to store the lora tensors
// todo: calculate size from biggest possible tensor
// create a name -> tensor map of the model to accelerate lookups
std::unordered_map<std::string, struct ggml_tensor*> model_tensors;
- for (auto & kv: model.tensors_by_name) {
+ for (const auto & kv : model.tensors_by_name) {
model_tensors.insert(kv);
}
-
// load base model
- std::unique_ptr<llama_model_loader> model_loader;
+ std::unique_ptr<llama_model_loader> ml;
ggml_context * base_ctx = NULL;
- llama_buffer base_buf;
+ std::vector<uint8_t> base_buf;
if (path_base_model) {
- fprintf(stderr, "%s: loading base model from '%s'\n", __func__, path_base_model);
- model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*vocab_only*/ false));
+ LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
+ ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
size_t ctx_size;
size_t mmapped_size;
- model_loader->calc_sizes(&ctx_size, &mmapped_size);
+ ml->calc_sizes(ctx_size, mmapped_size);
base_buf.resize(ctx_size);
ggml_init_params base_params;
- base_params.mem_size = base_buf.size;
- base_params.mem_buffer = base_buf.addr;
- base_params.no_alloc = model_loader->use_mmap;
+ base_params.mem_size = base_buf.size();
+ base_params.mem_buffer = base_buf.data();
+ base_params.no_alloc = ml->use_mmap;
base_ctx = ggml_init(base_params);
- model_loader->ggml_ctx = base_ctx;
-
// maybe this should in llama_model_loader
- if (model_loader->use_mmap) {
- model_loader->mapping.reset(new llama_mmap(&model_loader->file_loaders.at(0)->file, /* prefetch */ 0));
+ if (ml->use_mmap) {
+ ml->mapping.reset(new llama_mmap(&ml->file, /* prefetch */ 0, ggml_is_numa()));
}
}
// read tensors and apply
bool warned = false;
int n_tensors = 0;
+
+ std::vector<uint8_t> work_buffer;
+
while (true) {
int32_t n_dims;
int32_t length;
const std::string lora_suffix = ".lora";
size_t pos = name.rfind(lora_suffix);
if (pos == std::string::npos) {
- fprintf(stderr, "%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
+ LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str());
return 1;
}
std::string lora_type = name.substr(pos + lora_suffix.length());
std::string base_name = name;
base_name.erase(pos);
- // fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
+ // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
if (model_tensors.find(base_name) == model_tensors.end()) {
- fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
+ LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
return 1;
}
case 1: wtype = GGML_TYPE_F16; break;
default:
{
- fprintf(stderr, "%s: invalid tensor data type '%d'\n",
+ LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n",
__func__, ftype);
return false;
}
}
- ggml_tensor* lora_tensor;
+ ggml_tensor * lora_tensor;
if (n_dims == 2) {
lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]);
}
else {
- fprintf(stderr, "%s: unsupported tensor dimension %d\n", __func__, n_dims);
+ LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
return 1;
}
+ ggml_set_name(lora_tensor, "lora_tensor");
+
+ // load tensor data
+ size_t offset = fin.tellg();
+ size_t tensor_data_size = ggml_nbytes(lora_tensor);
+ offset = (offset + 31) & -32;
+ fin.seekg(offset);
+ fin.read((char*)lora_tensor->data, tensor_data_size);
+
+ lora_tensors[name] = lora_tensor;
+
+ // check if we have both A and B tensors and apply
+ if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() &&
+ lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
+
+ ggml_tensor * dest_t = model_tensors[base_name];
+
+ offload_func_t offload_func = llama_nop;
+ offload_func_t offload_func_force_inplace = llama_nop;
+
+#ifdef GGML_USE_CUBLAS
+ if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) {
+ if (dest_t->type != GGML_TYPE_F16) {
+ throw std::runtime_error(format(
+ "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models", __func__));
+ }
+ offload_func = ggml_cuda_assign_buffers;
+ offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace;
+ }
+#endif // GGML_USE_CUBLAS
+
+ ggml_tensor * base_t;
+ if (ml) {
+ struct gguf_context * ctx_gguf = ml->ctx_gguf;
+
+ // load from base model
+ if (gguf_find_tensor(ctx_gguf, base_name.c_str()) < 0) {
+ // TODO: throw
+ LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
+ return 1;
+ }
+
+ // TODO: not tested!! maybe not working!
+ base_t = ml->create_tensor(base_ctx, base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU);
+ ml->load_data_for(base_t);
+ } else {
+ base_t = dest_t;
+ }
+
+ if (ggml_is_quantized(base_t->type)) {
+ if (!warned) {
+ LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, "
+ "use a f16 or f32 base model with --lora-base\n", __func__);
+ warned = true;
+ }
+ }
+
+ ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
+ GGML_ASSERT(loraA->type == GGML_TYPE_F32);
+ ggml_set_name(loraA, "loraA");
+
+ ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
+ GGML_ASSERT(loraB->type == GGML_TYPE_F32);
+ ggml_set_name(loraB, "loraB");
+
+ if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
+ LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
+ " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
+ return 1;
+ }
+
+ // w = w + BA*s
+ ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB);
+ offload_func(BA);
+ ggml_set_name(BA, "BA");
+
+ if (scaling != 1.0f) {
+ ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, scaling);
+ ggml_set_name(scale_tensor, "scale_tensor");
+
+ BA = ggml_scale_inplace(lora_ctx, BA, scale_tensor);
+ offload_func(BA);
+ ggml_set_name(BA, "BA_scaled");
+ }
+
+ ggml_tensor * r;
+ if (base_t == dest_t) {
+ r = ggml_add_inplace(lora_ctx, dest_t, BA);
+ offload_func_force_inplace(r);
+ ggml_set_name(r, "r_add_inplace");
+ }
+ else {
+ r = ggml_add(lora_ctx, base_t, BA);
+ offload_func(r);
+ ggml_set_name(r, "r_add");
+
+ r = ggml_cpy(lora_ctx, r, dest_t);
+ offload_func(r);
+ ggml_set_name(r, "r_cpy");
+ }
+
+ struct ggml_cgraph gf = ggml_build_forward(r);
+
+ ggml_graph_compute_helper(work_buffer, &gf, n_threads);
+
+ // we won't need these tensors again, reset the context to save memory
+ ggml_free(lora_ctx);
+ lora_ctx = ggml_init(params);
+ lora_tensors.clear();
+
+ n_tensors++;
+ if (n_tensors % 4 == 0) {
+ LLAMA_LOG_INFO(".");
+ }
+ }
+ }
+
+ // TODO: this should be in a destructor, it will leak on failure
+ ggml_free(lora_ctx);
+ if (base_ctx) {
+ ggml_free(base_ctx);
+ }
+
+ const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
+ LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
+
+ return 0;
+}
+
+//
+// interface implementation
+//
+
+struct llama_context_params llama_context_default_params() {
+ struct llama_context_params result = {
+ /*.seed =*/ LLAMA_DEFAULT_SEED,
+ /*.n_ctx =*/ 512,
+ /*.n_batch =*/ 512,
+ /*.n_gpu_layers =*/ 0,
+ /*.main_gpu =*/ 0,
+ /*.tensor_split =*/ nullptr,
+ /*.rope_freq_base =*/ 10000.0f,
+ /*.rope_freq_scale =*/ 1.0f,
+ /*.progress_callback =*/ nullptr,
+ /*.progress_callback_user_data =*/ nullptr,
+ /*.low_vram =*/ false,
+ /*.mul_mat_q =*/ true,
+ /*.f16_kv =*/ true,
+ /*.logits_all =*/ false,
+ /*.vocab_only =*/ false,
+ /*.use_mmap =*/ true,
+ /*.use_mlock =*/ false,
+ /*.embedding =*/ false,
+ };
+
+#ifdef GGML_USE_METAL
+ result.n_gpu_layers = 1;
+#endif
+
+ return result;
+}
+
+struct llama_model_quantize_params llama_model_quantize_default_params() {
+ struct llama_model_quantize_params result = {
+ /*.nthread =*/ 0,
+ /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
+ /*.allow_requantize =*/ false,
+ /*.quantize_output_tensor =*/ true,
+ /*.only_copy =*/ false,
+ };
+
+ return result;
+}
+
+int llama_max_devices(void) {
+ return LLAMA_MAX_DEVICES;
+}
+
+bool llama_mmap_supported(void) {
+ return llama_mmap::SUPPORTED;
+}
+
+bool llama_mlock_supported(void) {
+ return llama_mlock::SUPPORTED;
+}
+
+void llama_backend_init(bool numa) {
+ ggml_time_init();
+
+ // needed to initialize f16 tables
+ {
+ struct ggml_init_params params = { 0, NULL, false };
+ struct ggml_context * ctx = ggml_init(params);
+ ggml_free(ctx);
+ }
+
+ if (numa) {
+ ggml_numa_init();
+ }
+
+#ifdef GGML_USE_MPI
+ ggml_mpi_backend_init();
+#endif
+}
+
+void llama_backend_free(void) {
+#ifdef GGML_USE_MPI
+ ggml_mpi_backend_free();
+#endif
+}
+
+int64_t llama_time_us(void) {
+ return ggml_time_us();
+}
+
+struct llama_model * llama_load_model_from_file(
+ const char * path_model,
+ struct llama_context_params params) {
+ ggml_time_init();
+
+ llama_model * model = new llama_model;
+
+ ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
+
+ unsigned cur_percentage = 0;
+ if (params.progress_callback == NULL) {
+ params.progress_callback_user_data = &cur_percentage;
+ params.progress_callback = [](float progress, void * ctx) {
+ unsigned * cur_percentage_p = (unsigned *) ctx;
+ unsigned percentage = (unsigned) (100 * progress);
+ while (percentage > *cur_percentage_p) {
+ *cur_percentage_p = percentage;
+ LLAMA_LOG_INFO(".");
+ if (percentage >= 100) {
+ LLAMA_LOG_INFO("\n");
+ }
+ }
+ };
+ }
+
+ if (!llama_model_load(path_model, *model, params.n_ctx, params.n_batch, params.n_gpu_layers,
+ params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,
+ params.low_vram, memory_type, params.use_mmap, params.use_mlock, params.vocab_only,
+ params.progress_callback, params.progress_callback_user_data)) {
+ LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
+ delete model;
+ return nullptr;
+ }
+
+ return model;
+}
+
+void llama_free_model(struct llama_model * model) {
+ delete model;
+}
+
+struct llama_context * llama_new_context_with_model(
+ struct llama_model * model,
+ struct llama_context_params params) {
+
+ if (!model) {
+ return nullptr;
+ }
+
+ llama_context * ctx = new llama_context(*model);
+
+ if (params.seed == LLAMA_DEFAULT_SEED) {
+ params.seed = time(NULL);
+ }
+
+ ctx->rng = std::mt19937(params.seed);
+ ctx->logits_all = params.logits_all;
+
+ ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
+
+ // reserve memory for context buffers
+ if (!params.vocab_only) {
+ if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) {
+ LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
+ llama_free(ctx);
+ return nullptr;
+ }
+
+ {
+ const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v);
+ LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
+ }
- // load tensor data
- size_t offset = fin.tellg();
- size_t tensor_data_size = ggml_nbytes(lora_tensor);
- offset = (offset + 31) & -32;
- fin.seekg(offset);
- fin.read((char*)lora_tensor->data, tensor_data_size);
+ const auto & hparams = ctx->model.hparams;
- lora_tensors[name] = lora_tensor;
+ // resized during inference
+ if (params.logits_all) {
+ ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab);
+ } else {
+ ctx->logits.reserve(hparams.n_vocab);
+ }
- // check if we have both A and B tensors and apply
- if (lora_tensors.find(base_name + ".loraA") != lora_tensors.end() &&
- lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
+ if (params.embedding){
+ ctx->embedding.resize(hparams.n_embd);
+ }
- ggml_tensor * dest_t = model_tensors[base_name];
- ggml_tensor * base_t;
- if (model_loader) {
- // load from base model
- if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) {
- fprintf(stderr, "%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
- return 1;
+ {
+ static const size_t tensor_alignment = 32;
+ // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data
+ ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead());
+
+ // create measure allocator
+ ctx->alloc = ggml_allocr_new_measure(tensor_alignment);
+
+ // build worst-case graph
+ int n_tokens = std::min((int)hparams.n_ctx, params.n_batch);
+ int n_past = hparams.n_ctx - n_tokens;
+ llama_token token = llama_token_bos(ctx); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
+ ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past);
+#ifdef GGML_USE_METAL
+ if (params.n_gpu_layers > 0) {
+ ctx->ctx_metal = ggml_metal_init(1);
+ if (!ctx->ctx_metal) {
+ LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__);
+ llama_free(ctx);
+ return NULL;
}
- size_t idx = model_loader->tensors_map.name_to_idx[base_name];
- llama_load_tensor & lt = model_loader->tensors_map.tensors[idx];
- base_t = model_loader->get_tensor(base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU);
- lt.data = (uint8_t *) lt.ggml_tensor->data;
- model_loader->load_data_for(lt);
- lt.ggml_tensor->data = lt.data;
- }
- else {
- base_t = dest_t;
+ ggml_metal_graph_find_concurrency(ctx->ctx_metal, gf, false);
+ ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
}
+#endif
+ // measure memory requirements for the graph
+ size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
- if (ggml_is_quantized(base_t->type)) {
- if (!warned) {
- fprintf(stderr, "%s: warning: using a lora adapter with a quantized model may result in poor quality, "
- "use a f16 or f32 base model with --lora-base\n", __func__);
- warned = true;
- }
- }
+ LLAMA_LOG_INFO("%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
- ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
- ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
+ // recreate allocator with exact memory requirements
+ ggml_allocr_free(ctx->alloc);
- if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
- fprintf(stderr, "%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");"
- " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]);
- return 1;
+ ctx->buf_alloc.resize(alloc_size);
+ ctx->alloc = ggml_allocr_new(ctx->buf_alloc.data, ctx->buf_alloc.size, tensor_alignment);
+#ifdef GGML_USE_METAL
+ if (ctx->ctx_metal) {
+ ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
}
+#endif
+#ifdef GGML_USE_CUBLAS
+ if (params.low_vram) {
+ LLAMA_LOG_INFO("%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__);
+ ggml_cuda_set_scratch_size(0); // disable scratch
+ } else {
+ ggml_cuda_set_scratch_size(alloc_size);
+ LLAMA_LOG_INFO("%s: VRAM scratch buffer: %.2f MB\n", __func__, alloc_size / 1024.0 / 1024.0);
+ }
+#endif
+ }
- // w = w + BA*s
- ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB);
+#ifdef GGML_USE_METAL
+ if (params.n_gpu_layers > 0) {
+ // this allocates all Metal resources and memory buffers
- if (scaling != 1.0f) {
- ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, scaling);
- BA = ggml_scale_inplace(lora_ctx, BA, scale_tensor);
- }
+ void * data_ptr = NULL;
+ size_t data_size = 0;
- ggml_tensor * r;
- if (base_t == dest_t) {
- r = ggml_add_inplace(lora_ctx, dest_t, BA);
- }
- else {
- r = ggml_add(lora_ctx, base_t, BA);
- r = ggml_cpy(lora_ctx, r, dest_t);
+ if (params.use_mmap) {
+ data_ptr = ctx->model.mapping->addr;
+ data_size = ctx->model.mapping->size;
+ } else {
+ data_ptr = ggml_get_mem_buffer(ctx->model.ctx);
+ data_size = ggml_get_mem_size (ctx->model.ctx);
}
- struct ggml_cgraph gf = ggml_build_forward(r);
- ggml_graph_compute_with_ctx(lora_ctx, &gf, n_threads);
+ const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx);
- // we won't need these tensors again, reset the context to save memory
- ggml_free(lora_ctx);
- lora_ctx = ggml_init(params);
- lora_tensors.clear();
+ LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
- n_tensors++;
- if (n_tensors % 4 == 0) {
- fprintf(stderr, ".");
+#define LLAMA_METAL_CHECK_BUF(result) \
+ if (!(result)) { \
+ LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \
+ llama_free(ctx); \
+ return NULL; \
}
+
+ LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
+
+ LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.data, ctx->buf_compute.size, 0));
+ LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0));
+
+ LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "alloc", ctx->buf_alloc.data, ctx->buf_alloc.size, 0));
+#undef LLAMA_METAL_CHECK_BUF
}
+#endif
}
- // TODO: this should be in a destructor, it will leak on failure
- ggml_free(lora_ctx);
- if (base_ctx) {
- ggml_free(base_ctx);
+#ifdef GGML_USE_MPI
+ ctx->ctx_mpi = ggml_mpi_init();
+
+ if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
+ // Enter a blocking eval loop with dummy input, letting rank=0 drive the process
+ const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
+ while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
+ llama_backend_free();
+ exit(1);
}
+#endif
- const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
- fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0);
+ return ctx;
+}
- return 0;
+struct llama_context * llama_init_from_file(
+ const char * path_model,
+ struct llama_context_params params) {
+ struct llama_model * model = llama_load_model_from_file(path_model, params);
+ if (!model) {
+ return nullptr;
+ }
+
+ struct llama_context * ctx = llama_new_context_with_model(model, params);
+ ctx->model_owner = true;
+
+ return ctx;
+}
+
+void llama_free(struct llama_context * ctx) {
+ delete ctx;
+}
+
+int llama_n_vocab(const struct llama_context * ctx) {
+ return llama_model_n_vocab(&ctx->model);
+}
+
+int llama_n_ctx(const struct llama_context * ctx) {
+ return llama_model_n_ctx(&ctx->model);
+}
+
+int llama_n_ctx_train(const struct llama_context * ctx) {
+ return llama_model_n_ctx_train(&ctx->model);
+}
+
+int llama_n_embd(const struct llama_context * ctx) {
+ return llama_model_n_embd(&ctx->model);
+}
+
+enum llama_vocab_type llama_vocab_type(const struct llama_context * ctx) {
+ return ctx->model.vocab.type;
+}
+
+int llama_model_n_vocab(const struct llama_model * model) {
+ return model->vocab.id_to_token.size();
+}
+
+int llama_model_n_ctx(const struct llama_model * model) {
+ return model->hparams.n_ctx;
+}
+
+int llama_model_n_ctx_train(const struct llama_model * model) {
+ return model->hparams.n_ctx_train;
+}
+
+int llama_model_n_embd(const struct llama_model * model) {
+ return model->hparams.n_embd;
+}
+
+int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
+ return snprintf(buf, buf_size, "%s %s %s",
+ model->name.c_str(),
+ llama_model_type_name(model->type),
+ llama_model_ftype_name(model->ftype).c_str());
+}
+
+uint64_t llama_model_size(const struct llama_model * model) {
+ uint64_t size = 0;
+ for (const auto & it : model->tensors_by_name) {
+ size += ggml_nbytes(it.second);
+ }
+ return size;
+}
+
+uint64_t llama_model_n_params(const struct llama_model * model) {
+ uint64_t nparams = 0;
+ for (const auto & it : model->tensors_by_name) {
+ nparams += ggml_nelements(it.second);
+ }
+ return nparams;
+}
+
+int llama_model_quantize(
+ const char * fname_inp,
+ const char * fname_out,
+ const llama_model_quantize_params * params) {
+ try {
+ llama_model_quantize_internal(fname_inp, fname_out, params);
+ return 0;
+ } catch (const std::exception & err) {
+ LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
+ return 1;
+ }
}
int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) {
try {
- return llama_apply_lora_from_file_internal(ctx, path_lora, path_base_model, n_threads);
- } catch (const std::string & err) {
- fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.c_str());
+ return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads);
+ } catch (const std::exception & err) {
+ LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
+ return 1;
+ }
+}
+
+int llama_model_apply_lora_from_file(const struct llama_model * model, const char * path_lora, const char * path_base_model, int n_threads) {
+ try {
+ return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads);
+ } catch (const std::exception & err) {
+ LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
return 1;
}
}
int llama_get_kv_cache_token_count(const struct llama_context * ctx) {
- return ctx->model.kv_self.n;
+ return ctx->kv_self.n;
}
#define LLAMA_MAX_RNG_STATE (64*1024)
-void llama_set_rng_seed(struct llama_context * ctx, int seed) {
- if (seed < 0) {
+void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed) {
+ if (seed == LLAMA_DEFAULT_SEED) {
seed = time(NULL);
}
ctx->rng.seed(seed);
const size_t s_embedding = ctx->embedding.size() * sizeof(float);
const size_t s_kv_size = sizeof(size_t);
const size_t s_kv_ntok = sizeof(int);
- const size_t s_kv = ctx->model.kv_self.buf.size;
+ const size_t s_kv = ctx->kv_self.buf.size;
const size_t s_total = (
+ s_rng_size
return s_total;
}
-// Copies the state to the specified destination address
-size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
- uint8_t * out = dst;
+// llama_context_data
+struct llama_data_context {
+ virtual void write(const void * src, size_t size) = 0;
+ virtual size_t get_size_written() = 0;
+ virtual ~llama_data_context() = default;
+};
+
+struct llama_data_buffer_context : llama_data_context {
+ uint8_t * ptr;
+ size_t size_written = 0;
+
+ llama_data_buffer_context(uint8_t * p) : ptr(p) {}
+
+ void write(const void * src, size_t size) override {
+ memcpy(ptr, src, size);
+ ptr += size;
+ size_written += size;
+ }
+
+ size_t get_size_written() override {
+ return size_written;
+ }
+};
+
+struct llama_data_file_context : llama_data_context {
+ llama_file * file;
+ size_t size_written = 0;
+
+ llama_data_file_context(llama_file * f) : file(f) {}
+
+ void write(const void * src, size_t size) override {
+ file->write_raw(src, size);
+ size_written += size;
+ }
+
+ size_t get_size_written() override {
+ return size_written;
+ }
+};
+/** copy state data into either a buffer or file depending on the passed in context
+ *
+ * file context:
+ * llama_file file("/path", "wb");
+ * llama_data_file_context data_ctx(&file);
+ * llama_copy_state_data(ctx, &data_ctx);
+ *
+ * buffer context:
+ * std::vector<uint8_t> buf(max_size, 0);
+ * llama_data_buffer_context data_ctx(&buf.data());
+ * llama_copy_state_data(ctx, &data_ctx);
+ *
+*/
+void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) {
// copy rng
{
std::stringstream rng_ss;
memset(&rng_buf[0], 0, LLAMA_MAX_RNG_STATE);
memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size());
- memcpy(out, &rng_size, sizeof(rng_size)); out += sizeof(rng_size);
- memcpy(out, &rng_buf[0], LLAMA_MAX_RNG_STATE); out += LLAMA_MAX_RNG_STATE;
+ data_ctx->write(&rng_size, sizeof(rng_size));
+ data_ctx->write(&rng_buf[0], LLAMA_MAX_RNG_STATE);
}
// copy logits
const size_t logits_cap = ctx->logits.capacity();
const size_t logits_size = ctx->logits.size();
- memcpy(out, &logits_cap, sizeof(logits_cap)); out += sizeof(logits_cap);
- memcpy(out, &logits_size, sizeof(logits_size)); out += sizeof(logits_size);
+ data_ctx->write(&logits_cap, sizeof(logits_cap));
+ data_ctx->write(&logits_size, sizeof(logits_size));
if (logits_size) {
- memcpy(out, ctx->logits.data(), logits_size * sizeof(float));
+ data_ctx->write(ctx->logits.data(), logits_size * sizeof(float));
}
- out += logits_cap * sizeof(float);
+ // If there is a gap between the size and the capacity, write padding
+ size_t padding_size = (logits_cap - logits_size) * sizeof(float);
+ if (padding_size > 0) {
+ std::vector<uint8_t> padding(padding_size, 0); // Create a buffer filled with zeros
+ data_ctx->write(padding.data(), padding_size);
+ }
}
// copy embeddings
{
const size_t embedding_size = ctx->embedding.size();
- memcpy(out, &embedding_size, sizeof(embedding_size)); out += sizeof(embedding_size);
+ data_ctx->write(&embedding_size, sizeof(embedding_size));
if (embedding_size) {
- memcpy(out, ctx->embedding.data(), embedding_size * sizeof(float));
- out += embedding_size * sizeof(float);
+ data_ctx->write(ctx->embedding.data(), embedding_size * sizeof(float));
}
}
// copy kv cache
{
- const auto & kv_self = ctx->model.kv_self;
+ const auto & kv_self = ctx->kv_self;
const auto & hparams = ctx->model.hparams;
const int n_layer = hparams.n_layer;
- const int n_embd = hparams.n_embd;
+ const int n_embd = hparams.n_embd_gqa();
const int n_ctx = hparams.n_ctx;
const size_t kv_size = kv_self.buf.size;
const int kv_ntok = llama_get_kv_cache_token_count(ctx);
- memcpy(out, &kv_size, sizeof(kv_size)); out += sizeof(kv_size);
- memcpy(out, &kv_ntok, sizeof(kv_ntok)); out += sizeof(kv_ntok);
+ data_ctx->write(&kv_size, sizeof(kv_size));
+ data_ctx->write(&kv_ntok, sizeof(kv_ntok));
if (kv_size) {
const size_t elt_size = ggml_element_size(kv_self.k);
- char buffer[4096];
-
- ggml_context * cpy_ctx = ggml_init({ sizeof(buffer), buffer, /* no_alloc */ true });
+ ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
ggml_cgraph gf{};
ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
- kout3d->data = out;
- out += ggml_nbytes(kout3d);
+ std::vector<uint8_t> kout3d_data(ggml_nbytes(kout3d), 0);
+ kout3d->data = kout3d_data.data();
ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer);
- vout3d->data = out;
- out += ggml_nbytes(vout3d);
+ std::vector<uint8_t> vout3d_data(ggml_nbytes(vout3d), 0);
+ vout3d->data = vout3d_data.data();
ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
n_embd, kv_ntok, n_layer,
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, k3d, kout3d));
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, v3d, vout3d));
- ggml_graph_compute_with_ctx(cpy_ctx, &gf, 1);
+ ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
ggml_free(cpy_ctx);
+
+ // our data is now in the kout3d_data and vout3d_data buffers
+ // write them to file
+ data_ctx->write(kout3d_data.data(), kout3d_data.size());
+ data_ctx->write(vout3d_data.data(), vout3d_data.size());
}
}
+}
- const size_t written = out - dst;
- const size_t max_size = llama_get_state_size(ctx);
-
- LLAMA_ASSERT(written <= max_size);
+size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
+ llama_data_buffer_context data_ctx(dst);
+ llama_copy_state_data_internal(ctx, &data_ctx);
- return written;
+ return data_ctx.get_size_written();
}
// Sets the state reading from the specified source address
rng_ss.str(std::string(&rng_buf[0], rng_size));
rng_ss >> ctx->rng;
- LLAMA_ASSERT(rng_ss.fail() == false);
+ GGML_ASSERT(!rng_ss.fail());
}
// set logits
memcpy(&logits_cap, inp, sizeof(logits_cap)); inp += sizeof(logits_cap);
memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
- LLAMA_ASSERT(ctx->logits.capacity() == logits_cap);
+ GGML_ASSERT(ctx->logits.capacity() == logits_cap);
if (logits_size) {
ctx->logits.resize(logits_size);
memcpy(&embedding_size, inp, sizeof(embedding_size)); inp += sizeof(embedding_size);
- LLAMA_ASSERT(ctx->embedding.capacity() == embedding_size);
+ GGML_ASSERT(ctx->embedding.capacity() == embedding_size);
if (embedding_size) {
memcpy(ctx->embedding.data(), inp, embedding_size * sizeof(float));
// set kv cache
{
- const auto & kv_self = ctx->model.kv_self;
+ const auto & kv_self = ctx->kv_self;
const auto & hparams = ctx->model.hparams;
const int n_layer = hparams.n_layer;
- const int n_embd = hparams.n_embd;
+ const int n_embd = hparams.n_embd_gqa();
const int n_ctx = hparams.n_ctx;
size_t kv_size;
memcpy(&kv_ntok, inp, sizeof(kv_ntok)); inp += sizeof(kv_ntok);
if (kv_size) {
- LLAMA_ASSERT(kv_self.buf.size == kv_size);
+ GGML_ASSERT(kv_self.buf.size == kv_size);
const size_t elt_size = ggml_element_size(kv_self.k);
- char buffer[4096];
-
- ggml_context * cpy_ctx = ggml_init({ sizeof(buffer), buffer, /* no_alloc */ true });
+ ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
ggml_cgraph gf{};
ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer);
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, kin3d, k3d));
ggml_build_forward_expand(&gf, ggml_cpy(cpy_ctx, vin3d, v3d));
- ggml_graph_compute_with_ctx(cpy_ctx, &gf, 1);
+ ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1);
ggml_free(cpy_ctx);
}
- ctx->model.kv_self.n = kv_ntok;
+ ctx->kv_self.n = kv_ntok;
}
const size_t nread = inp - src;
const size_t max_size = llama_get_state_size(ctx);
- LLAMA_ASSERT(nread <= max_size);
+ GGML_ASSERT(nread <= max_size);
return nread;
}
-bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
+static bool llama_load_session_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
llama_file file(path_session, "rb");
// sanity checks
const uint32_t version = file.read_u32();
if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
- fprintf(stderr, "%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
+ LLAMA_LOG_ERROR("%s : unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
return false;
}
file.read_raw(&session_hparams, sizeof(llama_hparams));
if (session_hparams != ctx->model.hparams) {
- fprintf(stderr, "%s : model hparams didn't match from session file!\n", __func__);
+ LLAMA_LOG_INFO("%s : model hparams didn't match from session file!\n", __func__);
return false;
}
}
const uint32_t n_token_count = file.read_u32();
if (n_token_count > n_token_capacity) {
- fprintf(stderr, "%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
+ LLAMA_LOG_ERROR("%s : token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
return false;
}
const size_t n_state_size_max = llama_get_state_size(ctx);
if (n_state_size_cur > n_state_size_max) {
- fprintf(stderr, "%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
+ LLAMA_LOG_ERROR("%s : the state size in session file is too big! max %zu, got %zu\n", __func__, n_state_size_max, n_state_size_cur);
return false;
}
return true;
}
+bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
+ try {
+ return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
+ } catch (const std::exception & err) {
+ LLAMA_LOG_ERROR("error loading session file: %s\n", err.what());
+ return false;
+ }
+}
+
bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
llama_file file(path_session, "wb");
file.write_u32((uint32_t) n_token_count);
file.write_raw(tokens, sizeof(llama_token) * n_token_count);
- // save the context state
- {
- const size_t n_state_size_max = llama_get_state_size(ctx);
-
- std::vector<uint8_t> state_data(n_state_size_max);
- const size_t n_state_size_cur = llama_copy_state_data(ctx, state_data.data());
-
- file.write_raw(state_data.data(), n_state_size_cur);
- }
+ // save the context state using stream saving
+ llama_data_file_context data_ctx(&file);
+ llama_copy_state_data_internal(ctx, &data_ctx);
return true;
}
int n_tokens,
int n_past,
int n_threads) {
- if (!llama_eval_internal(*ctx, tokens, n_tokens, n_past, n_threads)) {
- fprintf(stderr, "%s: failed to eval\n", __func__);
+ if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) {
+ LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
return 1;
}
return 0;
}
-int llama_tokenize(
- struct llama_context * ctx,
- const char * text,
- llama_token * tokens,
- int n_max_tokens,
- bool add_bos) {
- auto res = llama_tokenize(ctx->vocab, text, add_bos);
-
- if (n_max_tokens < (int) res.size()) {
- fprintf(stderr, "%s: too many tokens\n", __func__);
- return -((int) res.size());
+int llama_eval_embd(
+ struct llama_context * ctx,
+ const float * embd,
+ int n_tokens,
+ int n_past,
+ int n_threads) {
+ if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) {
+ LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
+ return 1;
}
- for (size_t i = 0; i < res.size(); i++) {
- tokens[i] = res[i];
+ // get a more accurate load time, upon first eval
+ // TODO: fix this
+ if (!ctx->has_evaluated_once) {
+ ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
+ ctx->has_evaluated_once = true;
}
- return res.size();
+ return 0;
}
-int llama_n_vocab(const struct llama_context * ctx) {
- return ctx->vocab.id_to_token.size();
-}
+int llama_eval_export(struct llama_context * ctx, const char * fname) {
+ const int n_batch = 1;
+ const int n_ctx = 512 - n_batch;
-int llama_n_ctx(const struct llama_context * ctx) {
- return ctx->model.hparams.n_ctx;
-}
+ const std::vector<llama_token> tmp(n_batch, llama_token_bos(ctx));
-int llama_n_embd(const struct llama_context * ctx) {
- return ctx->model.hparams.n_embd;
+ if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) {
+ LLAMA_LOG_ERROR("%s: failed to eval\n", __func__);
+ return 1;
+ }
+
+ return 0;
}
float * llama_get_logits(struct llama_context * ctx) {
return ctx->embedding.data();
}
-const char * llama_token_to_str(const struct llama_context * ctx, llama_token token) {
- if (token >= llama_n_vocab(ctx)) {
- return nullptr;
- }
+const char * llama_token_get_text(const struct llama_context * ctx, llama_token token) {
+ return ctx->model.vocab.id_to_token[token].text.c_str();
+}
- return ctx->vocab.id_to_token[token].tok.c_str();
+float llama_token_get_score(const struct llama_context * ctx, llama_token token) {
+ return ctx->model.vocab.id_to_token[token].score;
}
-llama_token llama_token_bos() {
- return 1;
+llama_token_type llama_token_get_type(const struct llama_context * ctx, llama_token token) {
+ return ctx->model.vocab.id_to_token[token].type;
}
-llama_token llama_token_eos() {
- return 2;
+llama_token llama_token_bos(const struct llama_context * ctx) {
+ return ctx->model.vocab.special_bos_id;
}
-llama_token llama_token_nl() {
- return 13;
+llama_token llama_token_eos(const struct llama_context * ctx) {
+ return ctx->model.vocab.special_eos_id;
}
+llama_token llama_token_nl(const struct llama_context * ctx) {
+ return ctx->model.vocab.linefeed_id;
+}
-void llama_print_timings(struct llama_context * ctx) {
- const int64_t t_end_us = ggml_time_us();
+int llama_tokenize(
+ struct llama_context * ctx,
+ const char * text,
+ llama_token * tokens,
+ int n_max_tokens,
+ bool add_bos) {
+ return llama_tokenize_with_model(&ctx->model, text, tokens, n_max_tokens, add_bos);
+}
+
+int llama_tokenize_with_model(
+ const struct llama_model * model,
+ const char * text,
+ llama_token * tokens,
+ int n_max_tokens,
+ bool add_bos) {
+ auto res = llama_tokenize_internal(model->vocab, text, add_bos);
+
+ if (n_max_tokens < (int) res.size()) {
+ // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
+ return -((int) res.size());
+ }
+
+ for (size_t i = 0; i < res.size(); i++) {
+ tokens[i] = res[i];
+ }
+
+ return res.size();
+}
+
+int llama_token_to_piece(const struct llama_context * ctx, llama_token token, char * buf, int length) {
+ return llama_token_to_piece_with_model(&ctx->model, token, buf, length);
+}
+
+// does not write null-terminator to buf
+int llama_token_to_piece_with_model(const struct llama_model * model, llama_token token, char * buf, int length) {
+ if (0 <= token && token < llama_model_n_vocab(model)) {
+ if (llama_is_normal_token(model->vocab, token)) {
+ std::string result = model->vocab.id_to_token[token].text;
+ if (llama_vocab_get_type(model->vocab) == LLAMA_VOCAB_TYPE_SPM) {
+ llama_unescape_whitespace(result);
+ }
+ if (length < (int) result.length()) {
+ return -result.length();
+ }
+ memcpy(buf, result.c_str(), result.length());
+ return result.length();
+ } else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT
+ if (length < 3) {
+ return -3;
+ }
+ buf[0] = '\xe2';
+ buf[1] = '\x96';
+ buf[2] = '\x85';
+ return 3;
+ } else if (llama_is_control_token(model->vocab, token)) {
+ ;
+ } else if (llama_is_byte_token(model->vocab, token)) {
+ if (length < 1) {
+ return -1;
+ }
+ buf[0] = llama_token_to_byte(model->vocab, token);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+struct llama_timings llama_get_timings(struct llama_context * ctx) {
+ struct llama_timings result = {
+ /*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
+ /*.t_end_ms =*/ 1.00 * ggml_time_ms(),
+ /*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
+ /*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
+ /*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
+ /*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
+
+ /*.n_sample =*/ std::max(1, ctx->n_sample),
+ /*.n_p_eval =*/ std::max(1, ctx->n_p_eval),
+ /*.n_eval =*/ std::max(1, ctx->n_eval),
+ };
- const int32_t n_sample = std::max(1, ctx->n_sample);
- const int32_t n_eval = std::max(1, ctx->n_eval);
- const int32_t n_p_eval = std::max(1, ctx->n_p_eval);
+ return result;
+}
- fprintf(stderr, "\n");
- fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0);
- fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token)\n", __func__, 1e-3 * ctx->t_sample_us, n_sample, 1e-3 * ctx->t_sample_us / n_sample);
- fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token)\n", __func__, 1e-3 * ctx->t_p_eval_us, n_p_eval, 1e-3 * ctx->t_p_eval_us / n_p_eval);
- fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token)\n", __func__, 1e-3 * ctx->t_eval_us, n_eval, 1e-3 * ctx->t_eval_us / n_eval);
- fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_end_us - ctx->t_start_us)/1000.0);
+void llama_print_timings(struct llama_context * ctx) {
+ const llama_timings timings = llama_get_timings(ctx);
+
+ LLAMA_LOG_INFO("\n");
+ LLAMA_LOG_INFO("%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
+ LLAMA_LOG_INFO("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
+ __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
+ LLAMA_LOG_INFO("%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
+ __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
+ LLAMA_LOG_INFO("%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
+ __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
+ LLAMA_LOG_INFO("%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
}
void llama_reset_timings(struct llama_context * ctx) {
s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
+ s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
return s.c_str();
}
+void llama_dump_timing_info_yaml(FILE * stream, const llama_context * ctx) {
+ fprintf(stream, "\n");
+ fprintf(stream, "###########\n");
+ fprintf(stream, "# Timings #\n");
+ fprintf(stream, "###########\n");
+ fprintf(stream, "\n");
+
+ fprintf(stream, "mst_eval: %.2f # ms / token during generation\n",
+ 1.0e-3 * ctx->t_eval_us / ctx->n_eval);
+ fprintf(stream, "mst_p_eval: %.2f # ms / token during prompt processing\n",
+ 1.0e-3 * ctx->t_p_eval_us / ctx->n_p_eval);
+ fprintf(stream, "mst_sample: %.2f # ms / token during sampling\n",
+ 1.0e-3 * ctx->t_sample_us / ctx->n_sample);
+ fprintf(stream, "n_eval: %d # number of tokens generated (excluding the first one)\n", ctx->n_eval);
+ fprintf(stream, "n_p_eval: %d # number of tokens processed in batches at the beginning\n", ctx->n_p_eval);
+ fprintf(stream, "n_sample: %d # number of sampled tokens\n", ctx->n_sample);
+ fprintf(stream, "t_eval_us: %" PRId64 " # total microseconds spent generating tokens\n", ctx->t_eval_us);
+ fprintf(stream, "t_load_us: %" PRId64 " # total microseconds spent loading the model\n", ctx->t_load_us);
+ fprintf(stream, "t_p_eval_us: %" PRId64 " # total microseconds spent prompt processing\n", ctx->t_p_eval_us);
+ fprintf(stream, "t_sample_us: %" PRId64 " # total microseconds spent sampling\n", ctx->t_sample_us);
+ fprintf(stream, "ts_eval: %.2f # tokens / second during generation\n",
+ 1.0e6 * ctx->n_eval / ctx->t_eval_us);
+ fprintf(stream, "ts_p_eval: %.2f # tokens / second during prompt processing\n",
+ 1.0e6 * ctx->n_p_eval / ctx->t_p_eval_us);
+ fprintf(stream, "ts_sample: %.2f # tokens / second during sampling\n",
+ 1.0e6 * ctx->n_sample / ctx->t_sample_us);
+}
+
// For internal test use
-std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
+const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx) {
return ctx->model.tensors_by_name;
}
+
+void llama_log_set(llama_log_callback log_callback, void * user_data) {
+ g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
+ g_state.log_callback_user_data = user_data;
+}
+
+static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) {
+ va_list args_copy;
+ va_copy(args_copy, args);
+ char buffer[128];
+ int len = vsnprintf(buffer, 128, format, args);
+ if (len < 128) {
+ g_state.log_callback(level, buffer, g_state.log_callback_user_data);
+ } else {
+ char* buffer2 = new char[len+1];
+ vsnprintf(buffer2, len+1, format, args_copy);
+ buffer2[len] = 0;
+ g_state.log_callback(level, buffer2, g_state.log_callback_user_data);
+ delete[] buffer2;
+ }
+ va_end(args_copy);
+}
+
+static void llama_log_internal(llama_log_level level, const char * format, ...) {
+ va_list args;
+ va_start(args, format);
+ llama_log_internal_v(level, format, args);
+ va_end(args);
+}
+
+static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) {
+ (void) level;
+ (void) user_data;
+ fputs(text, stderr);
+ fflush(stderr);
+}