#define LLAMA_API_INTERNAL
+//#define LLAMA_GGML_BACKEND_CUDA_TEST // for testing only - enables ggml-cuda through ggml-backend, disables partial offloading
#include "llama.h"
#include "unicode.h"
#include "ggml.h"
-
#include "ggml-alloc.h"
+#include "ggml-backend.h"
#ifdef GGML_USE_CUBLAS
# include "ggml-cuda.h"
#include <unistd.h>
#if defined(_POSIX_MAPPED_FILES)
#include <sys/mman.h>
+ #include <fcntl.h>
#endif
#if defined(_POSIX_MEMLOCK_RANGE)
#include <sys/resource.h>
#endif
#include <windows.h>
#include <io.h>
- #include <stdio.h> // for _fseeki64
#endif
#include <algorithm>
#include <set>
#include <sstream>
#include <thread>
+#include <type_traits>
#include <unordered_map>
#if defined(_MSC_VER)
#define LLAMA_ATTRIBUTE_FORMAT(...)
#endif
+#define LLAMA_MAX_NODES 8192
+#define LLAMA_MAX_EXPERTS 8
+
//
// logging
//
LLM_ARCH_PERSIMMON,
LLM_ARCH_REFACT,
LLM_ARCH_BLOOM,
+ LLM_ARCH_STABLELM,
+ LLM_ARCH_QWEN,
+ LLM_ARCH_PHI2,
LLM_ARCH_UNKNOWN,
};
{ LLM_ARCH_PERSIMMON, "persimmon" },
{ LLM_ARCH_REFACT, "refact" },
{ LLM_ARCH_BLOOM, "bloom" },
+ { LLM_ARCH_STABLELM, "stablelm" },
+ { LLM_ARCH_QWEN, "qwen" },
+ { LLM_ARCH_PHI2, "phi2" },
};
enum llm_kv {
LLM_KV_FEED_FORWARD_LENGTH,
LLM_KV_USE_PARALLEL_RESIDUAL,
LLM_KV_TENSOR_DATA_LAYOUT,
+ LLM_KV_EXPERT_COUNT,
+ LLM_KV_EXPERT_USED_COUNT,
LLM_KV_ATTENTION_HEAD_COUNT,
LLM_KV_ATTENTION_HEAD_COUNT_KV,
LLM_KV_TOKENIZER_UNK_ID,
LLM_KV_TOKENIZER_SEP_ID,
LLM_KV_TOKENIZER_PAD_ID,
+ LLM_KV_TOKENIZER_ADD_BOS,
+ LLM_KV_TOKENIZER_ADD_EOS,
LLM_KV_TOKENIZER_HF_JSON,
LLM_KV_TOKENIZER_RWKV,
};
{ LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" },
{ LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" },
{ LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" },
+ { LLM_KV_EXPERT_COUNT, "%s.expert_count" },
+ { LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" },
{ LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" },
{ LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" },
{ LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" },
{ LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" },
{ LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" },
+ { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" },
+ { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" },
{ LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" },
{ LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" },
};
LLM_TENSOR_ATTN_NORM,
LLM_TENSOR_ATTN_NORM_2,
LLM_TENSOR_ATTN_ROT_EMBD,
+ LLM_TENSOR_FFN_GATE_INP,
+ LLM_TENSOR_FFN_NORM,
LLM_TENSOR_FFN_GATE,
LLM_TENSOR_FFN_DOWN,
LLM_TENSOR_FFN_UP,
- LLM_TENSOR_FFN_NORM,
+ LLM_TENSOR_FFN_DOWN_EXP,
+ LLM_TENSOR_FFN_GATE_EXP,
+ LLM_TENSOR_FFN_UP_EXP,
LLM_TENSOR_ATTN_Q_NORM,
LLM_TENSOR_ATTN_K_NORM,
};
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
{ LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" },
+ { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" },
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" },
+ { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" },
+ { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" },
},
},
{
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
},
},
+ {
+ LLM_ARCH_STABLELM,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
+ {
+ LLM_ARCH_QWEN,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ROPE_FREQS, "rope_freqs" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
+ {
+ LLM_ARCH_PHI2,
+ {
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
+ { LLM_TENSOR_OUTPUT, "output" },
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
+ { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" },
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
+ },
+ },
+
{
LLM_ARCH_UNKNOWN,
{
std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid) + "." + suffix;
}
+
+ std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
+ return ::format(LLM_TENSOR_NAMES[arch].at(tensor).c_str(), bid, xid) + "." + suffix;
+ }
};
//
// gguf helpers
//
-#define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
-do { \
- const std::string skey(key); \
- const int kid = gguf_find_key(ctx, skey.c_str()); \
- if (kid >= 0) { \
- enum gguf_type ktype = gguf_get_kv_type(ctx, kid); \
- if (ktype != (type)) { \
- throw std::runtime_error(format("key %s has wrong type: %s", skey.c_str(), gguf_type_name(ktype))); \
- } \
- (dst) = func(ctx, kid); \
- } else if (req) { \
- throw std::runtime_error(format("key not found in model: %s", skey.c_str())); \
- } \
-} while (0)
-
static std::map<int8_t, std::string> LLAMA_ROPE_SCALING_TYPES = {
{ LLAMA_ROPE_SCALING_NONE, "none" },
{ LLAMA_ROPE_SCALING_LINEAR, "linear" },
return LLAMA_ROPE_SCALING_UNSPECIFIED;
}
+static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
+ switch (type) {
+ case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]);
+ case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]);
+ case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]);
+ case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]);
+ case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]);
+ case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]);
+ case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]);
+ case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]);
+ case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]);
+ case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]);
+ case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false";
+ default: return format("unknown type %d", type);
+ }
+}
+
+static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
+ const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
+
+ switch (type) {
+ case GGUF_TYPE_STRING:
+ return gguf_get_val_str(ctx_gguf, i);
+ case GGUF_TYPE_ARRAY:
+ {
+ const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
+ int arr_n = gguf_get_arr_n(ctx_gguf, i);
+ const void * data = gguf_get_arr_data(ctx_gguf, i);
+ std::stringstream ss;
+ ss << "[";
+ for (int j = 0; j < arr_n; j++) {
+ if (arr_type == GGUF_TYPE_STRING) {
+ std::string val = gguf_get_arr_str(ctx_gguf, i, j);
+ // escape quotes
+ replace_all(val, "\\", "\\\\");
+ replace_all(val, "\"", "\\\"");
+ ss << '"' << val << '"';
+ } else if (arr_type == GGUF_TYPE_ARRAY) {
+ ss << "???";
+ } else {
+ ss << gguf_data_to_str(arr_type, data, j);
+ }
+ if (j < arr_n - 1) {
+ ss << ", ";
+ }
+ }
+ ss << "]";
+ return ss.str();
+ }
+ default:
+ return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
+ }
+}
+
//
// ggml helpers
//
// llama helpers
//
-#ifdef GGML_USE_CUBLAS
-# define llama_host_malloc(n) ggml_cuda_host_malloc(n)
-# define llama_host_free(data) ggml_cuda_host_free(data)
-#elif GGML_USE_METAL
-# define llama_host_malloc(n) ggml_metal_host_malloc(n)
-# define llama_host_free(data) ggml_metal_host_free(data)
-#elif GGML_USE_CPU_HBM
-# define llama_host_malloc(n) hbw_malloc(n)
-# define llama_host_free(data) if (data != NULL) hbw_free(data)
-#else
-# define llama_host_malloc(n) malloc(n)
-# define llama_host_free(data) free(data)
-#endif
-
#if defined(_WIN32)
static std::string llama_format_win_err(DWORD err) {
LPSTR buf;
}
#endif
-struct llama_buffer {
- void * data = NULL;
- size_t size = 0;
-
- // fallback to malloc / free
- // useful in cases where CUDA can try to allocate PINNED memory
- bool fallback = false;
-
- void resize(size_t n) {
- llama_host_free(data);
-
- data = llama_host_malloc(n);
- if (!data) {
- fallback = true;
- data = malloc(n);
- } else {
- fallback = false;
- }
-
- GGML_ASSERT(data);
- size = n;
- }
-
- ~llama_buffer() {
- if (data) {
- if (fallback) { // NOLINT
- free(data);
- } else {
- llama_host_free(data);
- }
- }
-
- data = NULL;
- }
+template <typename T>
+struct no_init {
+ T value;
+ no_init() { /* do nothing */ }
};
struct llama_file {
#ifdef _POSIX_MAPPED_FILES
static constexpr bool SUPPORTED = true;
+ // list of mapped fragments (first_offset, last_offset)
+ std::vector<std::pair<size_t, size_t>> mapped_fragments;
+
llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
size = file->size;
int fd = fileno(file->fp);
// prefetch/readahead impairs performance on NUMA systems
if (numa) { prefetch = 0; }
#ifdef __linux__
+ // advise the kernel to read the file sequentially (increases readahead)
+ if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) {
+ LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n",
+ strerror(errno));
+ }
if (prefetch) { flags |= MAP_POPULATE; }
#endif
addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
- if (addr == MAP_FAILED) {
+ if (addr == MAP_FAILED) { // NOLINT
throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
}
if (prefetch > 0) {
- // Advise the kernel to preload the mapped memory
+ // advise the kernel to preload the mapped memory
if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) {
- fprintf(stderr, "warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
+ LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
strerror(errno));
}
}
// advise the kernel not to use readahead
// (because the next page might not belong on the same node)
if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) {
- fprintf(stderr, "warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
+ LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
strerror(errno));
}
}
+
+ // initialize list of mapped_fragments
+ mapped_fragments.emplace_back(0, file->size);
+ }
+
+ static void align_range(size_t * first, size_t * last, size_t page_size) {
+ // align first to the next page
+ size_t offset_in_page = *first & (page_size - 1);
+ size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page;
+ *first += offset_to_page;
+
+ // align last to the previous page
+ *last = *last & ~(page_size - 1);
+
+ if (*last <= *first) {
+ *last = *first;
+ }
+ }
+
+ // partially unmap the file in the range [first, last)
+ void unmap_fragment(size_t first, size_t last) {
+ // note: this function must not be called multiple times with overlapping ranges
+ // otherwise, there is a risk of invalidating addresses that have been repurposed for other mappings
+ int page_size = sysconf(_SC_PAGESIZE);
+ align_range(&first, &last, page_size);
+ size_t len = last - first;
+
+ if (len == 0) {
+ return;
+ }
+
+ GGML_ASSERT(first % page_size == 0);
+ GGML_ASSERT(last % page_size == 0);
+ GGML_ASSERT(last > first);
+
+ void * next_page_start = (uint8_t *) addr + first;
+
+ // unmap the range
+ if (munmap(next_page_start, len)) {
+ LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
+ }
+
+ // update the list of mapped fragments to avoid unmapping the same range again in the destructor
+ std::vector<std::pair<size_t, size_t>> new_mapped_fragments;
+ for (const auto & frag : mapped_fragments) {
+ if (frag.first < first && frag.second > last) {
+ // the range is in the middle of the fragment, split it
+ new_mapped_fragments.emplace_back(frag.first, first);
+ new_mapped_fragments.emplace_back(last, frag.second);
+ } else if (frag.first < first && frag.second > first) {
+ // the range starts in the middle of the fragment
+ new_mapped_fragments.emplace_back(frag.first, first);
+ } else if (frag.first < last && frag.second > last) {
+ // the range ends in the middle of the fragment
+ new_mapped_fragments.emplace_back(last, frag.second);
+ } else if (frag.first >= first && frag.second <= last) {
+ // the range covers the entire fragment
+ } else {
+ // the range is outside the fragment
+ new_mapped_fragments.push_back(frag);
+ }
+ }
+ mapped_fragments = std::move(new_mapped_fragments);
}
~llama_mmap() {
- munmap(addr, size);
+ for (const auto & frag : mapped_fragments) {
+ if (munmap((char *) addr + frag.first, frag.second - frag.first)) {
+ LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
+ }
+ }
}
#elif defined(_WIN32)
static constexpr bool SUPPORTED = true;
}
}
+ void unmap_fragment(size_t first, size_t last) {
+ // not supported
+ GGML_UNUSED(first);
+ GGML_UNUSED(last);
+ }
+
~llama_mmap() {
if (!UnmapViewOfFile(addr)) {
fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
throw std::runtime_error(std::string("mmap not supported"));
}
+
+ void unmap(size_t offset, size_t len) {
+ (void) offset;
+ (void) len;
+
+ throw std::runtime_error(std::string("mmap not supported"));
+ }
#endif
};
return std::string(result.data(), result.size());
}
+static ggml_backend_buffer_type_t llama_default_buffer_type(int n_gpu_layers) {
+#ifdef GGML_USE_METAL
+ if (n_gpu_layers > 0) {
+ return ggml_backend_metal_buffer_type();
+ }
+#elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST)
+ if (n_gpu_layers > 0) {
+ return ggml_backend_cuda_buffer_type(0);
+ }
+#elif defined(GGML_USE_CUBLAS)
+ return ggml_backend_cuda_host_buffer_type();
+#elif defined(GGML_USE_CPU_HBM)
+ return ggml_backend_cpu_hbm_buffer_type();
+#endif
+
+ return ggml_backend_cpu_buffer_type();
+
+ GGML_UNUSED(n_gpu_layers);
+}
+
//
// globals
//
struct llama_state {
+ llama_state() {
+#ifdef GGML_USE_METAL
+ ggml_metal_log_set_callback(log_callback, log_callback_user_data);
+#endif
+ }
+
// We save the log callback globally
ggml_log_callback log_callback = llama_log_callback_default;
void * log_callback_user_data = nullptr;
MODEL_70B,
};
-static const size_t kB = 1024;
-static const size_t MB = 1024*kB;
-static const size_t GB = 1024*MB;
+static const size_t kiB = 1024;
+static const size_t MiB = 1024*kiB;
+static const size_t GiB = 1024*MiB;
struct llama_hparams {
bool vocab_only;
uint32_t n_layer;
uint32_t n_rot;
uint32_t n_ff;
+ uint32_t n_expert = 0;
+ uint32_t n_expert_used = 0;
float f_norm_eps;
float f_norm_rms_eps;
float f_max_alibi_bias;
bool operator!=(const llama_hparams & other) const {
- if (this->vocab_only != other.vocab_only) return true;
- if (this->n_vocab != other.n_vocab) return true;
- if (this->n_ctx_train != other.n_ctx_train) return true;
- if (this->n_embd != other.n_embd) return true;
- if (this->n_head != other.n_head) return true;
- if (this->n_head_kv != other.n_head_kv) return true;
- if (this->n_layer != other.n_layer) return true;
- if (this->n_rot != other.n_rot) return true;
- if (this->n_ff != other.n_ff) return true;
+ if (this->vocab_only != other.vocab_only) return true;
+ if (this->n_vocab != other.n_vocab) return true;
+ if (this->n_ctx_train != other.n_ctx_train) return true;
+ if (this->n_embd != other.n_embd) return true;
+ if (this->n_head != other.n_head) return true;
+ if (this->n_head_kv != other.n_head_kv) return true;
+ if (this->n_layer != other.n_layer) return true;
+ if (this->n_rot != other.n_rot) return true;
+ if (this->n_ff != other.n_ff) return true;
+ if (this->n_expert != other.n_expert) return true;
+ if (this->n_expert_used != other.n_expert_used) return true;
+
if (this->rope_finetuned != other.rope_finetuned) return true;
if (this->n_yarn_orig_ctx != other.n_yarn_orig_ctx) return true;
float yarn_beta_slow;
bool mul_mat_q;
+ bool offload_kqv;
};
struct llama_layer {
struct ggml_tensor * wqkv;
// attention bias
+ struct ggml_tensor * bq;
+ struct ggml_tensor * bk;
+ struct ggml_tensor * bv;
struct ggml_tensor * bo;
struct ggml_tensor * bqkv;
struct ggml_tensor * ffn_down; // w2
struct ggml_tensor * ffn_up; // w3
+ // ff MoE
+ struct ggml_tensor * ffn_gate_inp;
+ struct ggml_tensor * ffn_gate_exp[LLAMA_MAX_EXPERTS];
+ struct ggml_tensor * ffn_down_exp[LLAMA_MAX_EXPERTS];
+ struct ggml_tensor * ffn_up_exp [LLAMA_MAX_EXPERTS];
+
// ff bias
struct ggml_tensor * ffn_down_b; // b2
struct ggml_tensor * ffn_up_b; // b3
// cannot be freely changed after a slot has been allocated.
uint32_t head = 0;
uint32_t size = 0;
+ uint32_t used = 0; // used cells (i.e. at least one seq_id)
// computed before each graph build
uint32_t n = 0;
std::vector<llama_kv_cell> cells;
- struct ggml_tensor * k = NULL;
- struct ggml_tensor * v = NULL;
+ std::vector<struct ggml_tensor *> k_l; // per layer
+ std::vector<struct ggml_tensor *> v_l;
struct ggml_context * ctx = NULL;
- llama_buffer buf;
+ ggml_backend_buffer_t buf = NULL;
~llama_kv_cache() {
+#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
+ if (ggml_cublas_loaded()) {
+ for (size_t i = 0; i < k_l.size(); ++i) {
+ ggml_cuda_free_data(k_l[i]);
+ ggml_cuda_free_data(v_l[i]);
+ }
+ }
+#endif
if (ctx) {
ggml_free(ctx);
}
-#ifdef GGML_USE_CUBLAS
- ggml_cuda_free_data(k);
- ggml_cuda_free_data(v);
-#endif // GGML_USE_CUBLAS
+ ggml_backend_buffer_free(buf);
}
};
id special_sep_id = -1;
id special_pad_id = -1;
+ int special_add_bos = -1; // -1 unknown, 1 add, 0 don't add.
+ int special_add_eos = -1; // -1 unknown, 1 add, 0 don't add.
+
id linefeed_id = 13;
id special_prefix_id = 32007;
id special_middle_id = 32009;
id special_suffix_id = 32008;
id special_eot_id = 32010;
- int find_bpe_rank(std::string token_left, std::string token_right) const {
- GGML_ASSERT(token_left.find(" ") == std::string::npos);
- GGML_ASSERT(token_left.find("\n") == std::string::npos);
- GGML_ASSERT(token_right.find(" ") == std::string::npos);
- GGML_ASSERT(token_right.find("\n") == std::string::npos);
+ int find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
+ GGML_ASSERT(token_left.find(' ') == std::string::npos);
+ GGML_ASSERT(token_left.find('\n') == std::string::npos);
+ GGML_ASSERT(token_right.find(' ') == std::string::npos);
+ GGML_ASSERT(token_right.find('\n') == std::string::npos);
auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
if (it == bpe_ranks.end()) {
struct ggml_tensor * output_norm;
struct ggml_tensor * output_norm_b;
struct ggml_tensor * output;
+ struct ggml_tensor * output_b;
std::vector<llama_layer> layers;
int n_gpu_layers;
+ // gguf metadata
+ std::unordered_map<std::string, std::string> gguf_kv;
+
// context
struct ggml_context * ctx = NULL;
// the model memory buffer
- llama_buffer buf;
+ ggml_backend_buffer_t buf = NULL;
// model memory mapped file
std::unique_ptr<llama_mmap> mapping;
int64_t t_start_us = 0;
~llama_model() {
- if (ctx) {
- ggml_free(ctx);
+#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
+ if (ggml_cublas_loaded()) {
+ for (size_t i = 0; i < tensors_by_name.size(); ++i) {
+ ggml_cuda_free_data(tensors_by_name[i].second);
+ }
+ ggml_cuda_free_scratch();
}
+#endif
-#ifdef GGML_USE_CUBLAS
- for (size_t i = 0; i < tensors_by_name.size(); ++i) {
- ggml_cuda_free_data(tensors_by_name[i].second);
- }
- ggml_cuda_free_scratch();
-#elif defined(GGML_USE_CLBLAST)
+#if defined(GGML_USE_CLBLAST)
for (size_t i = 0; i < tensors_by_name.size(); ++i) {
ggml_cl_free_data(tensors_by_name[i].second);
}
#endif
+ if (ctx) {
+ ggml_free(ctx);
+ }
+
+ ggml_backend_buffer_free(buf);
}
};
struct llama_context {
llama_context(const llama_model & model) : model(model), t_start_us(model.t_start_us), t_load_us(model.t_load_us) {}
~llama_context() {
-#ifdef GGML_USE_METAL
- if (ctx_metal) {
- ggml_metal_free(ctx_metal);
- }
-#endif
- if (alloc) {
- ggml_allocr_free(alloc);
- }
+ ggml_allocr_free(alloc);
+ ggml_backend_buffer_free(buf_alloc);
+ ggml_backend_free(backend);
}
llama_cparams cparams;
+ ggml_backend_t backend = nullptr;
+
const llama_model & model;
// key + value cache for the self attention
// decode output (2-dimensional array: [n_tokens][n_vocab])
std::vector<float> logits;
+#ifndef NDEBUG
+ // guard against access to unset logits
+ std::vector<bool> logits_valid;
+#endif
bool logits_all = false;
// input embedding (1-dimensional array: [n_embd])
std::vector<float> embedding;
- // reusable buffer for `struct ggml_graph_plan.work_data`
- std::vector<uint8_t> work_buffer;
-
// memory buffers used to evaluate the model
- llama_buffer buf_compute;
-
- llama_buffer buf_alloc;
+ std::vector<uint8_t> buf_compute_meta;
+ ggml_backend_buffer_t buf_alloc = NULL;
ggml_allocr * alloc = NULL;
-#ifdef GGML_USE_METAL
- ggml_metal_context * ctx_metal = NULL;
-#endif
+ // temporary buffer for copying data to/from the backend
+ std::vector<no_init<uint8_t>> buf_copy;
#ifdef GGML_USE_MPI
ggml_mpi_context * ctx_mpi = NULL;
static bool llama_kv_cache_init(
const struct llama_hparams & hparams,
struct llama_kv_cache & cache,
- ggml_type wtype,
+ ggml_type ktype,
+ ggml_type vtype,
uint32_t n_ctx,
- int n_gpu_layers) {
+ int n_gpu_layers,
+ bool offload) {
const uint32_t n_embd = hparams.n_embd_gqa();
const uint32_t n_layer = hparams.n_layer;
- const int64_t n_mem = n_layer*n_ctx;
- const int64_t n_elements = n_embd*n_mem;
-
cache.has_shift = false;
cache.head = 0;
cache.size = n_ctx;
+ cache.used = 0;
cache.cells.clear();
cache.cells.resize(n_ctx);
- cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*ggml_tensor_overhead());
- memset(cache.buf.data, 0, cache.buf.size);
-
struct ggml_init_params params;
- params.mem_size = cache.buf.size;
- params.mem_buffer = cache.buf.data;
- params.no_alloc = false;
+ params.mem_size = 2u*n_layer*ggml_tensor_overhead();
+ params.mem_buffer = NULL;
+ params.no_alloc = true;
cache.ctx = ggml_init(params);
+ size_t vram_kv_cache = 0;
+
if (!cache.ctx) {
LLAMA_LOG_ERROR("%s: failed to allocate memory for kv cache\n", __func__);
return false;
}
- cache.k = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
- cache.v = ggml_new_tensor_1d(cache.ctx, wtype, n_elements);
- ggml_set_name(cache.k, "cache_k");
- ggml_set_name(cache.v, "cache_v");
+ cache.k_l.reserve(n_layer);
+ cache.v_l.reserve(n_layer);
+
+ const int i_gpu_start = (int) n_layer - n_gpu_layers;
+
+ for (int i = 0; i < (int) n_layer; i++) {
+ ggml_tensor * k = ggml_new_tensor_1d(cache.ctx, ktype, n_embd*n_ctx);
+ ggml_tensor * v = ggml_new_tensor_1d(cache.ctx, vtype, n_embd*n_ctx);
+ ggml_format_name(k, "cache_k_l%d", i);
+ ggml_format_name(v, "cache_v_l%d", i);
+ cache.k_l.push_back(k);
+ cache.v_l.push_back(v);
+#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
+ if (i >= i_gpu_start) {
+ if (offload) {
+ ggml_cuda_assign_buffers_no_scratch(k);
+ ggml_cuda_assign_buffers_no_scratch(v);
+ vram_kv_cache += ggml_nbytes(k);
+ vram_kv_cache += ggml_nbytes(v);
+ // HACK: mark tensor as allocated
+ k->data = v->data = (void *)(uintptr_t)1;
+ }
+ }
+#endif // GGML_USE_CUBLAS
+ }
- (void) n_gpu_layers;
-#ifdef GGML_USE_CUBLAS
- size_t vram_kv_cache = 0;
+ // allocate tensors
+ cache.buf = ggml_backend_alloc_ctx_tensors_from_buft(cache.ctx, llama_default_buffer_type(n_gpu_layers));
- if (n_gpu_layers > (int)n_layer + 1) {
- ggml_cuda_assign_buffers_no_scratch(cache.v);
- LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__);
- vram_kv_cache += ggml_nbytes(cache.v);
- }
- if (n_gpu_layers > (int)n_layer + 2) {
- ggml_cuda_assign_buffers_no_scratch(cache.k);
- LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__);
- vram_kv_cache += ggml_nbytes(cache.k);
+ // buf may be NULL with full offload
+ if (cache.buf) {
+ // initialize the buffer to avoid NaNs in the padding
+ ggml_backend_buffer_clear(cache.buf, 0);
}
+
if (vram_kv_cache > 0) {
LLAMA_LOG_INFO("%s: VRAM kv self = %.2f MB\n", __func__, vram_kv_cache / 1024.0 / 1024.0);
}
-#endif // GGML_USE_CUBLAS
+
+ GGML_UNUSED(i_gpu_start);
+ GGML_UNUSED(offload);
return true;
}
}
}
+ cache.used += n_tokens;
+
return true;
}
cache.cells[i].seq_id.clear();
}
cache.head = 0;
+ cache.used = 0;
}
static void llama_kv_cache_seq_rm(
continue;
}
if (cache.cells[i].seq_id.empty()) {
+ // keep count of the number of used cells
+ if (cache.cells[i].pos >= 0) cache.used--;
+
cache.cells[i].pos = -1;
if (new_head == cache.size) new_head = i;
}
}
// If we freed up a slot, set head to it so searching can start there.
- if (new_head != cache.size) cache.head = new_head;
+ if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
}
static void llama_kv_cache_seq_cp(
for (uint32_t i = 0; i < cache.size; ++i) {
if (!cache.cells[i].has_seq_id(seq_id)) {
+ if (cache.cells[i].pos >= 0) cache.used--;
cache.cells[i].pos = -1;
cache.cells[i].seq_id.clear();
if (new_head == cache.size) new_head = i;
}
// If we freed up a slot, set head to it so searching can start there.
- if (new_head != cache.size) cache.head = new_head;
+ if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
}
static void llama_kv_cache_seq_shift(
cache.cells[i].delta += delta;
if (cache.cells[i].pos < 0) {
+ if (!cache.cells[i].seq_id.empty()) cache.used--;
cache.cells[i].pos = -1;
cache.cells[i].seq_id.clear();
if (new_head == cache.size) new_head = i;
return buf;
}
+namespace GGUFMeta {
+ template <typename T, gguf_type gt_, T (*gfun)(const gguf_context *, const int)>
+ struct GKV_Base_Type {
+ static constexpr gguf_type gt = gt_;
+
+ static T getter(const gguf_context * ctx, const int kid) {
+ return gfun(ctx, kid);
+ }
+ };
+
+ template<typename T> struct GKV_Base;
+
+ template<> struct GKV_Base<bool >: GKV_Base_Type<bool, GGUF_TYPE_BOOL, gguf_get_val_bool> {};
+ template<> struct GKV_Base<uint8_t >: GKV_Base_Type<uint8_t, GGUF_TYPE_UINT8, gguf_get_val_u8 > {};
+ template<> struct GKV_Base<uint16_t >: GKV_Base_Type<uint16_t, GGUF_TYPE_UINT16, gguf_get_val_u16 > {};
+ template<> struct GKV_Base<uint32_t >: GKV_Base_Type<uint32_t, GGUF_TYPE_UINT32, gguf_get_val_u32 > {};
+ template<> struct GKV_Base<uint64_t >: GKV_Base_Type<uint64_t, GGUF_TYPE_UINT64, gguf_get_val_u64 > {};
+ template<> struct GKV_Base<int8_t >: GKV_Base_Type<int8_t, GGUF_TYPE_INT8, gguf_get_val_i8 > {};
+ template<> struct GKV_Base<int16_t >: GKV_Base_Type<int16_t, GGUF_TYPE_INT16, gguf_get_val_i16 > {};
+ template<> struct GKV_Base<int32_t >: GKV_Base_Type<int32_t, GGUF_TYPE_INT32, gguf_get_val_i32 > {};
+ template<> struct GKV_Base<int64_t >: GKV_Base_Type<int64_t, GGUF_TYPE_INT64, gguf_get_val_i64 > {};
+ template<> struct GKV_Base<float >: GKV_Base_Type<float, GGUF_TYPE_FLOAT32, gguf_get_val_f32 > {};
+ template<> struct GKV_Base<double >: GKV_Base_Type<double, GGUF_TYPE_FLOAT64, gguf_get_val_f64 > {};
+ template<> struct GKV_Base<const char *>: GKV_Base_Type<const char *, GGUF_TYPE_STRING, gguf_get_val_str > {};
+
+ template<> struct GKV_Base<std::string> {
+ static constexpr gguf_type gt = GGUF_TYPE_STRING;
+
+ static std::string getter(const gguf_context * ctx, const int kid) {
+ return gguf_get_val_str(ctx, kid);
+ }
+ };
+
+ struct ArrayInfo{
+ const gguf_type gt;
+ const size_t length;
+ const void * data;
+ };
+
+ template<> struct GKV_Base<ArrayInfo> {
+ public:
+ static constexpr gguf_type gt = GGUF_TYPE_ARRAY;
+ static ArrayInfo getter(const gguf_context *ctx, const int k) {
+ return ArrayInfo {
+ gguf_get_arr_type(ctx, k),
+ size_t(gguf_get_arr_n(ctx, k)),
+ gguf_get_arr_data(ctx, k),
+ };
+ }
+ };
+
+ template<typename T>
+ class GKV: public GKV_Base<T> {
+ GKV() = delete;
+
+ public:
+ static T get_kv(const gguf_context * ctx, const int k) {
+ const enum gguf_type kt = gguf_get_kv_type(ctx, k);
+
+ if (kt != GKV::gt) {
+ throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
+ gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt)));
+ }
+ return GKV::getter(ctx, k);
+ }
+
+ static const char * override_type_to_str(const llama_model_kv_override_type ty) {
+ switch (ty) {
+ case LLAMA_KV_OVERRIDE_BOOL: return "bool";
+ case LLAMA_KV_OVERRIDE_INT: return "int";
+ case LLAMA_KV_OVERRIDE_FLOAT: return "float";
+ }
+ return "unknown";
+ }
+
+ static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override *override) {
+ if (!override) { return false; }
+ if (override->tag == expected_type) {
+ LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
+ __func__, override_type_to_str(override->tag), override->key);
+ switch (override->tag) {
+ case LLAMA_KV_OVERRIDE_BOOL: {
+ printf("%s\n", override->bool_value ? "true" : "false");
+ } break;
+ case LLAMA_KV_OVERRIDE_INT: {
+ printf("%" PRId64 "\n", override->int_value);
+ } break;
+ case LLAMA_KV_OVERRIDE_FLOAT: {
+ printf("%.6f\n", override->float_value);
+ } break;
+ default:
+ // Shouldn't be possible to end up here, but just in case...
+ throw std::runtime_error(
+ format("Unsupported attempt to override %s type for metadata key %s\n",
+ override_type_to_str(override->tag), override->key));
+ }
+ return true;
+ }
+ LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
+ __func__, override->key, override_type_to_str(expected_type), override_type_to_str(override->tag));
+ return false;
+ }
+
+ template<typename OT>
+ static typename std::enable_if<std::is_same<OT, bool>::value, bool>::type
+ try_override(OT & target, const struct llama_model_kv_override *override) {
+ if (validate_override(LLAMA_KV_OVERRIDE_BOOL, override)) {
+ target = override->bool_value;
+ return true;
+ }
+ return false;
+ }
+
+ template<typename OT>
+ static typename std::enable_if<!std::is_same<OT, bool>::value && std::is_integral<OT>::value, bool>::type
+ try_override(OT & target, const struct llama_model_kv_override *override) {
+ if (validate_override(LLAMA_KV_OVERRIDE_INT, override)) {
+ target = override->int_value;
+ return true;
+ }
+ return false;
+ }
+
+ template<typename OT>
+ static typename std::enable_if<std::is_floating_point<OT>::value, bool>::type
+ try_override(T & target, const struct llama_model_kv_override *override) {
+ if (validate_override(LLAMA_KV_OVERRIDE_FLOAT, override)) {
+ target = override->float_value;
+ return true;
+ }
+ return false;
+ }
+
+ template<typename OT>
+ static typename std::enable_if<std::is_same<OT, std::string>::value, bool>::type
+ try_override(T & target, const struct llama_model_kv_override *override) {
+ (void)target;
+ (void)override;
+ if (!override) { return false; }
+ // Currently, we should never end up here so it would be a bug if we do.
+ throw std::runtime_error(format("Unsupported attempt to override string type for metadata key %s\n",
+ override ? override->key : "NULL"));
+ }
+
+ static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override *override = nullptr) {
+ if (try_override<T>(target, override)) {
+ return true;
+ }
+ if (k < 0) { return false; }
+ target = get_kv(ctx, k);
+ return true;
+ }
+
+ static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override *override = nullptr) {
+ return set(ctx, gguf_find_key(ctx, key), target, override);
+ }
+
+ static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override *override = nullptr) {
+ return set(ctx, key.c_str(), target, override);
+ }
+ };
+}
+
struct llama_model_loader {
int n_kv = 0;
int n_tensors = 0;
llama_fver fver;
std::unique_ptr<llama_mmap> mapping;
+ std::unordered_map<std::string, struct llama_model_kv_override> kv_overrides;
struct gguf_context * ctx_gguf = NULL;
struct ggml_context * ctx_meta = NULL;
- llama_model_loader(const std::string & fname, bool use_mmap) : file(fname.c_str(), "rb") {
+ std::string arch_name;
+ LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
+
+ llama_model_loader(const std::string & fname, bool use_mmap, const struct llama_model_kv_override * param_overrides_p) : file(fname.c_str(), "rb") {
struct gguf_init_params params = {
/*.no_alloc = */ true,
/*.ctx = */ &ctx_meta,
};
+ if (param_overrides_p != nullptr) {
+ for (const struct llama_model_kv_override *p = param_overrides_p; p->key[0] != 0; p++) {
+ kv_overrides.insert({std::string(p->key), *p});
+ }
+ }
+
ctx_gguf = gguf_init_from_file(fname.c_str(), params);
if (!ctx_gguf) {
throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
}
+ get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
+ llm_kv = LLM_KV(llm_arch_from_string(arch_name));
+
n_kv = gguf_get_n_kv(ctx_gguf);
n_tensors = gguf_get_n_tensors(ctx_gguf);
enum ggml_type type_max = GGML_TYPE_F32;
for (int i = 0; i < n_tensors; i++) {
- const char * name = gguf_get_tensor_name(ctx_gguf, i);
- struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, name);
+ enum ggml_type type = gguf_get_tensor_type(ctx_gguf, i);
- n_type[meta->type]++;
+ n_type[type]++;
- if (n_type_max < n_type[meta->type]) {
- n_type_max = n_type[meta->type];
- type_max = meta->type;
+ if (n_type_max < n_type[type]) {
+ n_type_max = n_type[type];
+ type_max = type;
}
- LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str());
+ // LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str());
}
switch (type_max) {
case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break;
case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
default:
- {
- LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
- ftype = LLAMA_FTYPE_ALL_F32;
- } break;
+ {
+ LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
+ ftype = LLAMA_FTYPE_ALL_F32;
+ } break;
}
// this is a way to mark that we have "guessed" the file type
}
}
+ LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
for (int i = 0; i < n_kv; i++) {
- const char * name = gguf_get_key(ctx_gguf, i);
- const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
+ const char * name = gguf_get_key(ctx_gguf, i);
+ const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
+ const std::string type_name =
+ type == GGUF_TYPE_ARRAY
+ ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(ctx_gguf, i)), gguf_get_arr_n(ctx_gguf, i))
+ : gguf_type_name(type);
+
+ std::string value = gguf_kv_to_str(ctx_gguf, i);
+ const size_t MAX_VALUE_LEN = 40;
+ if (value.size() > MAX_VALUE_LEN) {
+ value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
+ }
+ replace_all(value, "\n", "\\n");
- LLAMA_LOG_INFO("%s: - kv %3d: %42s %-8s\n", __func__, i, name, gguf_type_name(type));
+ LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
}
// print type counts
}
}
- std::string get_arch_name() const {
- const auto kv = LLM_KV(LLM_ARCH_UNKNOWN);
+ template<typename T>
+ typename std::enable_if<std::is_integral<T>::value, bool>::type
+ get_arr_n(const std::string & key, T & result, const bool required = true) {
+ const int kid = gguf_find_key(ctx_gguf, key.c_str());
- std::string arch_name;
- GGUF_GET_KEY(ctx_gguf, arch_name, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_GENERAL_ARCHITECTURE));
+ if (kid < 0) {
+ if (required) {
+ throw std::runtime_error(format("key not found in model: %s", key.c_str()));
+ }
+ return false;
+ }
- return arch_name;
- }
+ struct GGUFMeta::ArrayInfo arr_info =
+ GGUFMeta::GKV<GGUFMeta::ArrayInfo>::get_kv(ctx_gguf, kid);
- enum llm_arch get_arch() const {
- const std::string arch_name = get_arch_name();
- return llm_arch_from_string(arch_name);
+ result = arr_info.length;
+ return true;
}
- const char * get_tensor_name(int i) const {
- return gguf_get_tensor_name(ctx_gguf, i);
+ template<typename T>
+ typename std::enable_if<std::is_integral<T>::value, bool>::type
+ get_arr_n(const enum llm_kv kid, T & result, const bool required = true) {
+ return get_arr_n(llm_kv(kid), result, required);
}
- struct ggml_tensor * get_tensor_meta(int i) const {
- return ggml_get_tensor(ctx_meta, get_tensor_name(i));
- }
+ template<typename T>
+ bool get_key(const std::string & key, T & result, const bool required = true) {
+ auto it = kv_overrides.find(key);
- void calc_sizes(size_t & ctx_size_p, size_t & mmapped_size_p) const {
- ctx_size_p = 0;
- mmapped_size_p = 0;
+ const struct llama_model_kv_override * override =
+ it != kv_overrides.end() ? &it->second : nullptr;
- for (int i = 0; i < n_tensors; i++) {
- struct ggml_tensor * meta = get_tensor_meta(i);
- ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE;
- (use_mmap ? mmapped_size_p : ctx_size_p) += ggml_nbytes_pad(meta);
+ const bool found = GGUFMeta::GKV<T>::set(ctx_gguf, key, result, override);
+
+ if (required && !found) {
+ throw std::runtime_error(format("key not found in model: %s", key.c_str()));
}
+
+ return found;
}
- struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta, ggml_backend_type backend) {
- if (backend != GGML_BACKEND_CPU) {
- ggml_set_no_alloc(ctx, true);
- }
+ template<typename T>
+ bool get_key(const enum llm_kv kid, T & result, const bool required = true) {
+ return get_key(llm_kv(kid), result, required);
+ }
+
+ std::string get_arch_name() const {
+ return arch_name;
+ }
+
+ enum llm_arch get_arch() const {
+ return llm_kv.arch;
+ }
+
+ const char * get_tensor_name(int i) const {
+ return gguf_get_tensor_name(ctx_gguf, i);
+ }
+
+ struct ggml_tensor * get_tensor_meta(const char * name) const {
+ return ggml_get_tensor(ctx_meta, name);
+ }
+ struct ggml_tensor * get_tensor_meta(int i) const {
+ return get_tensor_meta(get_tensor_name(i));
+ }
+
+ struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta, ggml_backend_type backend) {
struct ggml_tensor * tensor = ggml_dup_tensor(ctx, meta);
tensor->backend = backend; // TODO: ggml_set_backend
ggml_set_name(tensor, ggml_get_name(meta));
- if (backend != GGML_BACKEND_CPU) {
- ggml_set_no_alloc(ctx, use_mmap);
- }
-
n_created++;
return tensor;
}
- struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend) {
+ struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool required = true) {
struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
if (cur == NULL) {
+ if (!required) {
+ return NULL;
+ }
throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
}
return gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, idx);
}
+ void init_mapping(bool prefetch = true) {
+ /*
+ // prefetch only CPU tensors
+ if (use_mmap) {
+ size_t size_pref = 0; // prefetch
+
+ for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
+ struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
+ if (cur->backend == GGML_BACKEND_CPU) {
+ size_t tensor_end = gguf_get_tensor_offset(ctx_gguf, i) + ggml_nbytes(cur);
+ size_pref = std::max(size_pref, tensor_end);
+ }
+ }
+ mapping.reset(new llama_mmap(&file, gguf_get_data_offset(ctx_gguf) + size_pref, ggml_is_numa()));
+ }
+ */
+ // prefetch the whole file - all the data is needed anyway
+ if (use_mmap) {
+ mapping.reset(new llama_mmap(&file, prefetch ? -1 : 0, ggml_is_numa()));
+ }
+ }
+
+ // for backwards compatibility, does not support ggml-backend
void load_data_for(struct ggml_tensor * cur) const {
const size_t offs = file_offset(ggml_get_name(cur));
- if (use_mmap) {
- cur->data = (uint8_t *) mapping->addr + offs;
+ if (use_mmap && mapping) {
+ GGML_ASSERT(cur->data == nullptr);
+ cur->data = (uint8_t *)mapping->addr + offs;
} else {
+ GGML_ASSERT(cur->data != nullptr);
file.seek(offs, SEEK_SET);
file.read_raw(cur->data, ggml_nbytes(cur));
}
}
- void load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) {
+ // Returns false if cancelled by progress_callback
+ bool load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, ggml_backend_buffer_t buf_mmap, llama_mlock * lmlock) const {
size_t size_data = 0;
- size_t size_lock = 0;
- size_t size_pref = 0; // prefetch
for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
size_data += ggml_nbytes(cur);
- if (cur->backend == GGML_BACKEND_CPU) {
- size_pref += ggml_nbytes(cur);
- }
}
- if (use_mmap) {
- mapping.reset(new llama_mmap(&file, size_pref, ggml_is_numa()));
+ if (use_mmap && buf_mmap) {
if (lmlock) {
lmlock->init(mapping->addr);
}
}
- size_t done_size = 0;
+#if (defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)) || defined(GGML_USE_CLBLAST)
+ const bool legacy_offload = true;
+#else
+ const bool legacy_offload = false;
+#endif
+
+ std::vector<no_init<uint8_t>> read_buf;
+
+ size_t size_done = 0;
+
+ size_t mmap_first = -1;
+ size_t mmap_last = 0;
+
for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) {
struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i));
GGML_ASSERT(cur); // unused tensors should have been caught by load_data already
if (progress_callback) {
- progress_callback((float) done_size / size_data, progress_callback_user_data);
- }
-
- // allocate temp buffer if not using mmap
- if (!use_mmap && cur->data == NULL) {
- GGML_ASSERT(cur->backend != GGML_BACKEND_CPU);
- #ifdef GGML_USE_CPU_HBM
- cur->data = (uint8_t*)hbw_malloc(ggml_nbytes(cur));
- #else
- cur->data = (uint8_t*)malloc(ggml_nbytes(cur));
- #endif
+ if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
+ return false;
+ }
}
- load_data_for(cur);
+ const size_t offs = file_offset(ggml_get_name(cur));
- switch (cur->backend) {
- case GGML_BACKEND_CPU:
- if (use_mmap && lmlock) {
- size_lock += ggml_nbytes(cur);
- lmlock->grow_to(size_lock);
+ if (!legacy_offload || cur->backend == GGML_BACKEND_CPU) {
+ if (use_mmap && mapping) {
+ if (buf_mmap) {
+ ggml_backend_tensor_alloc(buf_mmap, cur, (uint8_t *) mapping->addr + offs);
+ if (lmlock) {
+ lmlock->grow_to(offs + ggml_nbytes(cur));
+ }
+ mmap_first = std::min(mmap_first, offs);
+ mmap_last = std::max(mmap_last, offs + ggml_nbytes(cur));
+ } else {
+ ggml_backend_tensor_set(cur, (uint8_t *) mapping->addr + offs, 0, ggml_nbytes(cur));
}
- break;
-#ifdef GGML_USE_CUBLAS
- case GGML_BACKEND_GPU:
- case GGML_BACKEND_GPU_SPLIT:
- // old code:
- //ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor);
-
- // TODO: test if this works !!
- ggml_cuda_transform_tensor(cur->data, cur);
- if (!use_mmap) {
- free(cur->data);
+ } else {
+ if (ggml_backend_buffer_is_host(cur->buffer)) {
+ file.seek(offs, SEEK_SET);
+ file.read_raw(cur->data, ggml_nbytes(cur));
+ } else {
+ read_buf.resize(ggml_nbytes(cur));
+ file.seek(offs, SEEK_SET);
+ file.read_raw(read_buf.data(), ggml_nbytes(cur));
+ ggml_backend_tensor_set(cur, read_buf.data(), 0, ggml_nbytes(cur));
}
- break;
+ }
+ } else {
+ // HACK: mark tensor as allocated
+ cur->data = (void *)(uintptr_t)1;
+ void * data;
+ if (use_mmap && mapping) {
+ data = (uint8_t *) mapping->addr + offs;
+ } else {
+ read_buf.resize(ggml_nbytes(cur));
+ file.seek(offs, SEEK_SET);
+ file.read_raw(read_buf.data(), ggml_nbytes(cur));
+ data = read_buf.data();
+ }
+
+#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
+ ggml_cuda_transform_tensor(data, cur);
#elif defined(GGML_USE_CLBLAST)
- case GGML_BACKEND_GPU:
- ggml_cl_transform_tensor(cur->data, cur);
- if (!use_mmap) {
- free(cur->data);
- }
- break;
+ GGML_ASSERT(cur->backend == GGML_BACKEND_GPU);
+ ggml_cl_transform_tensor(data, cur);
+#else
+ GGML_ASSERT(!"GPU tensor without a GPU backend");
+ GGML_UNUSED(data);
#endif
- default:
- continue;
}
- done_size += ggml_nbytes(cur);
+ size_done += ggml_nbytes(cur);
+ }
+
+ // unmap offloaded tensors and metadata
+ if (use_mmap && mapping) {
+ mapping->unmap_fragment(0, mmap_first);
+ mapping->unmap_fragment(mmap_last, mapping->size);
+ }
+
+ if (progress_callback) {
+ // Even though the model is done loading, we still honor
+ // cancellation since we need to free allocations.
+ return progress_callback(1.0f, progress_callback_user_data);
}
+ return true;
}
};
switch (ftype) {
case LLAMA_FTYPE_ALL_F32: return "all F32";
- case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16";
- case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
- case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
+ case LLAMA_FTYPE_MOSTLY_F16: return "F16";
+ case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0";
+ case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1";
case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
- return "mostly Q4_1, some F16";
- case LLAMA_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0";
- case LLAMA_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1";
- case LLAMA_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0";
+ return "Q4_1, some F16";
+ case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0";
+ case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1";
+ case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0";
// K-quants
- case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K";
- case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "mostly Q3_K - Small";
- case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "mostly Q3_K - Medium";
- case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "mostly Q3_K - Large";
- case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "mostly Q4_K - Small";
- case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "mostly Q4_K - Medium";
- case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "mostly Q5_K - Small";
- case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "mostly Q5_K - Medium";
- case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K";
+ case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K";
+ case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small";
+ case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium";
+ case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large";
+ case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small";
+ case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium";
+ case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small";
+ case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium";
+ case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K";
default: return "unknown, may not work";
}
static void llm_load_hparams(
llama_model_loader & ml,
llama_model & model) {
- struct gguf_context * ctx = ml.ctx_gguf;
-
- const auto kv = LLM_KV(model.arch);
-
auto & hparams = model.hparams;
+ const gguf_context * ctx = ml.ctx_gguf;
+
+ // get metadata as string
+ for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
+ enum gguf_type type = gguf_get_kv_type(ctx, i);
+ if (type == GGUF_TYPE_ARRAY) {
+ continue;
+ }
+ const char * name = gguf_get_key(ctx, i);
+ const std::string value = gguf_kv_to_str(ctx, i);
+ model.gguf_kv.emplace(name, value);
+ }
// get general kv
- GGUF_GET_KEY(ctx, model.name, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_GENERAL_NAME));
+ ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
// get hparams kv
- GGUF_GET_KEY(ctx, hparams.n_vocab, gguf_get_arr_n, GGUF_TYPE_ARRAY, true, kv(LLM_KV_TOKENIZER_LIST));
- GGUF_GET_KEY(ctx, hparams.n_ctx_train, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_CONTEXT_LENGTH));
- GGUF_GET_KEY(ctx, hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_EMBEDDING_LENGTH));
- GGUF_GET_KEY(ctx, hparams.n_ff, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_FEED_FORWARD_LENGTH));
- GGUF_GET_KEY(ctx, hparams.n_head, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_ATTENTION_HEAD_COUNT));
- GGUF_GET_KEY(ctx, hparams.n_layer, gguf_get_val_u32, GGUF_TYPE_UINT32, true, kv(LLM_KV_BLOCK_COUNT));
+ ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
+ ml.get_key (LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
+ ml.get_key (LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
+ ml.get_key (LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff);
+ ml.get_key (LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head);
+ ml.get_key (LLM_KV_BLOCK_COUNT, hparams.n_layer);
+ ml.get_key (LLM_KV_EXPERT_COUNT, hparams.n_expert, false);
+ ml.get_key (LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
+
+ GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
+ GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
+ if (hparams.n_expert > 0) {
+ GGML_ASSERT(hparams.n_expert_used > 0);
+ } else {
+ GGML_ASSERT(hparams.n_expert_used == 0);
+ }
// n_head_kv is optional, default to n_head
hparams.n_head_kv = hparams.n_head;
- GGUF_GET_KEY(ctx, hparams.n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ATTENTION_HEAD_COUNT_KV));
+ ml.get_key(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv, false);
- hparams.rope_finetuned = false;
- GGUF_GET_KEY(ctx, hparams.rope_finetuned, gguf_get_val_bool, GGUF_TYPE_BOOL, false,
- kv(LLM_KV_ROPE_SCALING_FINETUNED));
+ bool rope_finetuned = false;
+ ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
+ hparams.rope_finetuned = rope_finetuned;
hparams.n_yarn_orig_ctx = hparams.n_ctx_train;
- GGUF_GET_KEY(ctx, hparams.n_yarn_orig_ctx, gguf_get_val_u32, GGUF_TYPE_UINT32, false,
- kv(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN));
+ ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_yarn_orig_ctx, false);
// rope_freq_base (optional)
hparams.rope_freq_base_train = 10000.0f;
- GGUF_GET_KEY(ctx, hparams.rope_freq_base_train, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_FREQ_BASE));
+ ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
std::string rope_scaling("linear");
- GGUF_GET_KEY(ctx, rope_scaling, gguf_get_val_str, GGUF_TYPE_STRING, false, kv(LLM_KV_ROPE_SCALING_TYPE));
+ ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_UNSPECIFIED);
// rope_freq_scale (inverse of the kv) is optional
float ropescale = 0.0f;
- GGUF_GET_KEY(ctx, ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALING_FACTOR));
- if (ropescale == 0.0f) { // try the old key name
- GGUF_GET_KEY(ctx, ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ROPE_SCALE_LINEAR));
+ if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
+ // try the old key name
+ ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
}
hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
{
hparams.n_rot = hparams.n_embd / hparams.n_head;
- GGUF_GET_KEY(ctx, hparams.n_rot, gguf_get_val_u32, GGUF_TYPE_UINT32, false, kv(LLM_KV_ROPE_DIMENSION_COUNT));
+ ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON) {
if (hparams.n_rot != hparams.n_embd / hparams.n_head) {
switch (model.arch) {
case LLM_ARCH_LLAMA:
{
- GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
switch (hparams.n_layer) {
+ case 22: model.type = e_model::MODEL_1B; break;
case 26: model.type = e_model::MODEL_3B; break;
case 32: model.type = e_model::MODEL_7B; break;
case 40: model.type = e_model::MODEL_13B; break;
} break;
case LLM_ARCH_FALCON:
{
- GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
switch (hparams.n_layer) {
case 32: model.type = e_model::MODEL_7B; break;
} break;
case LLM_ARCH_BAICHUAN:
{
- GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
switch (hparams.n_layer) {
case 32: model.type = e_model::MODEL_7B; break;
case 40: model.type = e_model::MODEL_13B; break;
} break;
case LLM_ARCH_STARCODER:
{
- GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
switch (hparams.n_layer) {
case 24: model.type = e_model::MODEL_1B; break;
case 36: model.type = e_model::MODEL_3B; break;
} break;
case LLM_ARCH_PERSIMMON:
{
- GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
switch (hparams.n_layer) {
case 36: model.type = e_model::MODEL_8B; break;
default: model.type = e_model::MODEL_UNKNOWN;
} break;
case LLM_ARCH_REFACT:
{
- GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
switch (hparams.n_layer) {
case 32: model.type = e_model::MODEL_1B; break;
default: model.type = e_model::MODEL_UNKNOWN;
} break;
case LLM_ARCH_BLOOM:
{
- GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
switch (hparams.n_layer) {
case 24: model.type = e_model::MODEL_1B; break;
{
hparams.f_clamp_kqv = 0.0f;
- GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
- GGUF_GET_KEY(ctx, hparams.f_clamp_kqv, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, kv(LLM_KV_ATTENTION_CLAMP_KQV));
- GGUF_GET_KEY(ctx, hparams.f_max_alibi_bias, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_MAX_ALIBI_BIAS));
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
+ ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false);
+ ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
switch (hparams.n_layer) {
case 32: model.type = e_model::MODEL_7B; break;
default: model.type = e_model::MODEL_UNKNOWN;
}
} break;
+ case LLM_ARCH_STABLELM:
+ {
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
+
+ switch (hparams.n_layer) {
+ case 32: model.type = e_model::MODEL_3B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
+ case LLM_ARCH_QWEN:
+ {
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
+
+ switch (hparams.n_layer) {
+ case 32: model.type = e_model::MODEL_7B; break;
+ case 40: model.type = e_model::MODEL_13B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
+ case LLM_ARCH_PHI2:
+ {
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
+
+ switch (hparams.n_layer) {
+ case 32: model.type = e_model::MODEL_3B; break;
+ default: model.type = e_model::MODEL_UNKNOWN;
+ }
+ } break;
+
default: (void)0;
}
{
std::string tokenizer_name;
- GGUF_GET_KEY(ctx, tokenizer_name, gguf_get_val_str, GGUF_TYPE_STRING, true, kv(LLM_KV_TOKENIZER_MODEL));
+ ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_name);
if (tokenizer_name == "llama") {
vocab.type = LLAMA_VOCAB_TYPE_SPM;
};
for (const auto & it : special_token_types) {
const std::string & key = kv(std::get<0>(it));
- int32_t & id = std::get<1>(it), old_id = id;
+ int32_t & id = std::get<1>(it);
- GGUF_GET_KEY(ctx, id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, key);
- // Must be >= -1 and < vocab size. Since the key is unsigned, -1
- // can only come from the default value, so there's no point in
- // validating that.
- if (size_t(id + 1) > vocab.id_to_token.size()) {
- LLAMA_LOG_WARN("%s: bad special token: '%s' = %d, using default id %d\n",
- __func__, key.c_str(), id, old_id);
- id = old_id;
+ uint32_t new_id;
+ if (!ml.get_key(std::get<0>(it), new_id, false)) {
+ continue;
+ }
+ if (new_id >= vocab.id_to_token.size()) {
+ LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
+ __func__, key.c_str(), new_id, id);
+ } else {
+ id = new_id;
+ }
+
+ }
+
+ // Handle add_bos_token and add_eos_token
+ {
+ bool temp = true;
+
+ if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) {
+ vocab.special_add_bos = int(temp);
+ }
+ if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
+ vocab.special_add_eos = int(temp);
}
}
}
// The assumption is, since special tokens aren't meant to be exposed to end user, they are designed
// to be unmatchable by the tokenizer, therefore tokens from the vocab, which are unmatchable by the tokenizer
// are special tokens.
- // From testing, this appears to corelate 1:1 with special tokens.
+ // From testing, this appears to correlate 1:1 with special tokens.
//
// Counting special tokens and verifying in only one direction
LLAMA_LOG_INFO("%s: f_clamp_kqv = %.1e\n", __func__, hparams.f_clamp_kqv);
LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n", __func__, hparams.f_max_alibi_bias);
LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff);
+ LLAMA_LOG_INFO("%s: n_expert = %u\n", __func__, hparams.n_expert);
+ LLAMA_LOG_INFO("%s: n_expert_used = %u\n", __func__, hparams.n_expert_used);
LLAMA_LOG_INFO("%s: rope scaling = %s\n", __func__, rope_scaling_type.c_str());
LLAMA_LOG_INFO("%s: freq_base_train = %.1f\n", __func__, hparams.rope_freq_base_train);
LLAMA_LOG_INFO("%s: freq_scale_train = %g\n", __func__, hparams.rope_freq_scale_train);
LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
- if (ml.n_bytes < GB) {
- LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
+ if (ml.n_bytes < GiB) {
+ LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
} else {
LLAMA_LOG_INFO("%s: model size = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
}
// general kv
- LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
+ LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, model.name.c_str());
// special tokens
- if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
- if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
- if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
- if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
- if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
- if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
+ if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); }
+ if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); }
+ if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); }
+ if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); }
+ if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); }
+ if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
}
-static void llm_load_tensors(
+// Returns false if cancelled by progress_callback
+static bool llm_load_tensors(
llama_model_loader & ml,
llama_model & model,
int n_gpu_layers,
model.n_gpu_layers = n_gpu_layers;
- size_t ctx_size;
- size_t mmapped_size;
+ size_t ctx_size = ggml_tensor_overhead() * ml.n_tensors;
- ml.calc_sizes(ctx_size, mmapped_size);
-
- LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0);
+ LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MiB\n", __func__, ctx_size/1024.0/1024.0);
// create the ggml context
{
- model.buf.resize(ctx_size);
- if (use_mlock) {
- model.mlock_buf.init (model.buf.data);
- model.mlock_buf.grow_to(model.buf.size);
- }
-
struct ggml_init_params params = {
- /*.mem_size =*/ model.buf.size,
- /*.mem_buffer =*/ model.buf.data,
- /*.no_alloc =*/ ml.use_mmap,
+ /*.mem_size =*/ ctx_size,
+ /*.mem_buffer =*/ NULL,
+ /*.no_alloc =*/ true,
};
model.ctx = ggml_init(params);
}
(void) main_gpu;
-#ifdef GGML_USE_CUBLAS
- LLAMA_LOG_INFO("%s: using " GGML_CUDA_NAME " for GPU acceleration\n", __func__);
- ggml_cuda_set_main_device(main_gpu);
-#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
-#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT
+
+ enum ggml_backend_type llama_backend_offload = GGML_BACKEND_CPU;
+ enum ggml_backend_type llama_backend_offload_split = GGML_BACKEND_CPU;
+
+#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
+ if (ggml_cublas_loaded()) {
+ LLAMA_LOG_INFO("%s: using " GGML_CUDA_NAME " for GPU acceleration\n", __func__);
+ ggml_cuda_set_main_device(main_gpu);
+
+ llama_backend_offload = GGML_BACKEND_GPU;
+ llama_backend_offload_split = GGML_BACKEND_GPU_SPLIT;
+ }
#elif defined(GGML_USE_CLBLAST)
- LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__);
-#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU
-#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU
-#else
-#define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_CPU
-#define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_CPU
+ LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__);
+ llama_backend_offload = GGML_BACKEND_GPU;
+ llama_backend_offload_split = GGML_BACKEND_GPU;
#endif
- // prepare memory for the weights
- size_t vram_weights = 0;
+ // create tensors for the weights
{
const int64_t n_embd = hparams.n_embd;
const int64_t n_embd_gqa = hparams.n_embd_gqa();
ggml_backend_type backend_output;
if (n_gpu_layers > int(n_layer)) {
- // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
- // on Windows however this is detrimental unless everything is on the GPU
-#ifndef _WIN32
- backend_norm = LLAMA_BACKEND_OFFLOAD;
-#else
- backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
-#endif // _WIN32
-
- backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ backend_norm = llama_backend_offload;
+ backend_output = llama_backend_offload_split;
} else {
backend_norm = GGML_BACKEND_CPU;
backend_output = GGML_BACKEND_CPU;
model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
-
- if (backend_norm == GGML_BACKEND_GPU) {
- vram_weights += ggml_nbytes(model.output_norm);
- }
- if (backend_output == GGML_BACKEND_GPU_SPLIT) {
- vram_weights += ggml_nbytes(model.output);
- }
}
const uint32_t n_ff = hparams.n_ff;
model.layers.resize(n_layer);
for (uint32_t i = 0; i < n_layer; ++i) {
- const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
- const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
auto & layer = model.layers[i];
layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+ // optional bias tensors
+ layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend, false);
+ layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend, false);
+ layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend, false);
+ layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend, false);
+
layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
- layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
- layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
- layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_gate_inp = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd}, backend, false);
- if (backend == GGML_BACKEND_GPU) {
- vram_weights +=
- ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
- ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
- ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up);
+ if (layer.ffn_gate_inp == nullptr) {
+ GGML_ASSERT(hparams.n_expert == 0);
+ GGML_ASSERT(hparams.n_expert_used == 0);
+
+ layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
+ layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ } else {
+ GGML_ASSERT(hparams.n_expert > 0);
+ GGML_ASSERT(hparams.n_expert_used > 0);
+
+ // MoE branch
+ for (uint32_t x = 0; x < hparams.n_expert; ++x) {
+ layer.ffn_gate_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE_EXP, "weight", i, x), {n_embd, n_ff}, backend_split);
+ layer.ffn_down_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN_EXP, "weight", i, x), { n_ff, n_embd}, backend_split);
+ layer.ffn_up_exp[x] = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP_EXP, "weight", i, x), {n_embd, n_ff}, backend_split);
+ }
}
}
} break;
ggml_backend_type backend_output;
if (n_gpu_layers > int(n_layer)) {
- // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
- // on Windows however this is detrimental unless everything is on the GPU
-#ifndef _WIN32
- backend_norm = LLAMA_BACKEND_OFFLOAD;
-#else
- backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
-#endif // _WIN32
-
- backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ backend_norm = llama_backend_offload;
+ backend_output = llama_backend_offload_split;
} else {
backend_norm = GGML_BACKEND_CPU;
backend_output = GGML_BACKEND_CPU;
model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
-
- if (backend_norm == GGML_BACKEND_GPU) {
- vram_weights += ggml_nbytes(model.output_norm);
- }
- if (backend_output == GGML_BACKEND_GPU_SPLIT) {
- vram_weights += ggml_nbytes(model.output);
- }
}
const uint32_t n_ff = hparams.n_ff;
model.layers.resize(n_layer);
for (uint32_t i = 0; i < n_layer; ++i) {
- const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
- const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
auto & layer = model.layers[i];
layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
-
- if (backend == GGML_BACKEND_GPU) {
- vram_weights +=
- ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.wq) + ggml_nbytes(layer.wk) +
- ggml_nbytes(layer.wv) + ggml_nbytes(layer.wo) + ggml_nbytes(layer.ffn_norm) +
- ggml_nbytes(layer.ffn_gate) + ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up);
- }
}
} break;
case LLM_ARCH_FALCON:
{
- // TODO: CPU-only for now
-
model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
// output
ggml_backend_type backend_output;
if (n_gpu_layers > int(n_layer)) {
- // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
- // on Windows however this is detrimental unless everything is on the GPU
-#ifndef _WIN32
- backend_norm = LLAMA_BACKEND_OFFLOAD;
-#else
- backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
-#endif // _WIN32
-
- backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ backend_norm = llama_backend_offload;
+ backend_output = llama_backend_offload_split;
} else {
backend_norm = GGML_BACKEND_CPU;
backend_output = GGML_BACKEND_CPU;
model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
-
- if (backend_norm == GGML_BACKEND_GPU) {
- vram_weights += ggml_nbytes(model.output_norm);
- vram_weights += ggml_nbytes(model.output_norm_b);
- }
- if (backend_output == GGML_BACKEND_GPU_SPLIT) {
- vram_weights += ggml_nbytes(model.output);
- }
}
const uint32_t n_ff = hparams.n_ff;
model.layers.resize(n_layer);
for (uint32_t i = 0; i < n_layer; ++i) {
- const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
- const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
auto & layer = model.layers[i];
if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i).c_str()) >= 0) {
layer.attn_norm_2 = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, backend);
layer.attn_norm_2_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, backend);
-
- if (backend == GGML_BACKEND_GPU) {
- vram_weights += ggml_nbytes(layer.attn_norm_2);
- vram_weights += ggml_nbytes(layer.attn_norm_2_b);
- }
}
layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
-
- if (backend == GGML_BACKEND_GPU) {
- vram_weights +=
- ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) +
- ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.wo) +
- ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_up);
- }
}
} break;
case LLM_ARCH_STARCODER:
ggml_backend_type backend_output;
if (n_gpu_layers > int(n_layer)) {
- // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
- // on Windows however this is detrimental unless everything is on the GPU
-#ifndef _WIN32
- backend_norm = LLAMA_BACKEND_OFFLOAD;
-#else
- backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
-#endif // _WIN32
-
- backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ backend_norm = llama_backend_offload;
+ backend_output = llama_backend_offload_split;
} else {
backend_norm = GGML_BACKEND_CPU;
backend_output = GGML_BACKEND_CPU;
model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
-
- if (backend_norm == GGML_BACKEND_GPU) {
- vram_weights += ggml_nbytes(model.output_norm);
- vram_weights += ggml_nbytes(model.output_norm_b);
- }
- if (backend_output == GGML_BACKEND_GPU_SPLIT) {
- vram_weights += ggml_nbytes(model.output);
- }
}
const uint32_t n_ff = hparams.n_ff;
model.layers.resize(n_layer);
for (uint32_t i = 0; i < n_layer; ++i) {
- const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
- const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
auto & layer = model.layers[i];
layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
-
- if (backend == GGML_BACKEND_GPU) {
- vram_weights +=
- ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) +
- ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) +
- ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) +
- ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.ffn_norm_b) +
- ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b) +
- ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b);
- }
}
} break;
case LLM_ARCH_PERSIMMON:
ggml_backend_type backend_output;
if (n_gpu_layers > int(n_layer)) {
- // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
- // on Windows however this is detrimental unless everything is on the GPU
-#ifndef _WIN32
- backend_norm = LLAMA_BACKEND_OFFLOAD;
-#else
- backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
-#endif // _WIN32
-
- backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ backend_norm = llama_backend_offload;
+ backend_output = llama_backend_offload_split;
} else {
backend_norm = GGML_BACKEND_CPU;
backend_output = GGML_BACKEND_CPU;
model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
-
- if (backend_norm == GGML_BACKEND_GPU) {
- vram_weights += ggml_nbytes(model.output_norm);
- vram_weights += ggml_nbytes(model.output_norm_b);
- }
- if (backend_output == GGML_BACKEND_GPU_SPLIT) {
- vram_weights += ggml_nbytes(model.output);
- }
}
const uint32_t n_ff = hparams.n_ff;
const int i_gpu_start = n_layer - n_gpu_layers;
model.layers.resize(n_layer);
for (uint32_t i = 0; i < n_layer; ++i) {
- const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
- const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT;
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload;
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split;
auto & layer = model.layers[i];
layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
} break;
case LLM_ARCH_BLOOM:
{
- // TODO: CPU-only for now
-
model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
model.tok_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, GGML_BACKEND_CPU);
model.tok_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, GGML_BACKEND_CPU);
ggml_backend_type backend_output;
if (n_gpu_layers > int(n_layer)) {
- // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
- // on Windows however this is detrimental unless everything is on the GPU
-#ifndef _WIN32
- backend_norm = LLAMA_BACKEND_OFFLOAD;
-#else
- backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
-#endif // _WIN32
-
- backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ backend_norm = llama_backend_offload;
+ backend_output = llama_backend_offload_split;
} else {
backend_norm = GGML_BACKEND_CPU;
backend_output = GGML_BACKEND_CPU;
model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
-
- if (backend_norm == GGML_BACKEND_GPU) {
- vram_weights += ggml_nbytes(model.output_norm);
- vram_weights += ggml_nbytes(model.output_norm_b);
- }
- if (backend_output == GGML_BACKEND_GPU_SPLIT) {
- vram_weights += ggml_nbytes(model.output);
- }
}
const uint32_t n_ff = hparams.n_ff;
model.layers.resize(n_layer);
for (uint32_t i = 0; i < n_layer; ++i) {
- const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
- const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
auto & layer = model.layers[i];
layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
-
- if (backend == GGML_BACKEND_GPU) {
- vram_weights +=
- ggml_nbytes(layer.attn_norm) + ggml_nbytes(layer.attn_norm_b) +
- ggml_nbytes(layer.wqkv) + ggml_nbytes(layer.bqkv) +
- ggml_nbytes(layer.wo) + ggml_nbytes(layer.bo) +
- ggml_nbytes(layer.ffn_norm) + ggml_nbytes(layer.ffn_norm_b) +
- ggml_nbytes(layer.ffn_up) + ggml_nbytes(layer.ffn_up_b) +
- ggml_nbytes(layer.ffn_down) + ggml_nbytes(layer.ffn_down_b);
- }
}
} break;
case LLM_ARCH_MPT:
ggml_backend_type backend_output;
if (n_gpu_layers > int(n_layer)) {
- // norm is not performance relevant on its own but keeping it in VRAM reduces data copying
- // on Windows however this is detrimental unless everything is on the GPU
-#ifndef _WIN32
- backend_norm = LLAMA_BACKEND_OFFLOAD;
-#else
- backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
-#endif // _WIN32
-
- backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
+ backend_norm = llama_backend_offload;
+ backend_output = llama_backend_offload_split;
} else {
backend_norm = GGML_BACKEND_CPU;
backend_output = GGML_BACKEND_CPU;
model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
-
- if (backend_norm == GGML_BACKEND_GPU) {
- vram_weights += ggml_nbytes(model.output_norm);
- }
- if (backend_output == GGML_BACKEND_GPU_SPLIT) {
- vram_weights += ggml_nbytes(model.output);
- }
}
const uint32_t n_ff = hparams.n_ff;
model.layers.resize(n_layer);
for (uint32_t i = 0; i < n_layer; ++i) {
- const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
- const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
auto & layer = model.layers[i];
layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
-
- if (backend == GGML_BACKEND_GPU) {
- vram_weights +=
- ggml_nbytes(layer.attn_norm) +
- ggml_nbytes(layer.wqkv) +
- ggml_nbytes(layer.wo) +
- ggml_nbytes(layer.ffn_norm) +
- ggml_nbytes(layer.ffn_down) +
- ggml_nbytes(layer.ffn_up);
- }
}
} break;
- default:
- throw std::runtime_error("unknown architecture");
- }
- }
+ case LLM_ARCH_STABLELM:
+ {
+ model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
- ml.done_getting_tensors();
+ // output
+ {
+ ggml_backend_type backend_norm;
+ ggml_backend_type backend_output;
- // print memory requirements
- {
- // this is the total memory required to run the inference
- size_t mem_required =
- ctx_size +
- mmapped_size - vram_weights; // weights in VRAM not in memory
+ if (n_gpu_layers > int(n_layer)) {
+ backend_norm = llama_backend_offload;
+ backend_output = llama_backend_offload_split;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
- LLAMA_LOG_INFO("%s: mem required = %7.2f MB\n", __func__, mem_required / 1024.0 / 1024.0);
+ model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
+ }
-#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
- const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
+ const uint32_t n_ff = hparams.n_ff;
- LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
- if (n_gpu_layers > (int) hparams.n_layer) {
- LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
- }
+ const int i_gpu_start = n_layer - n_gpu_layers;
-#ifdef GGML_USE_CUBLAS
- const int max_backend_supported_layers = hparams.n_layer + 3;
- const int max_offloadable_layers = hparams.n_layer + 3;
-#elif GGML_USE_CLBLAST
- const int max_backend_supported_layers = hparams.n_layer + 1;
- const int max_offloadable_layers = hparams.n_layer + 1;
-#endif // GGML_USE_CUBLAS
+ model.layers.resize(n_layer);
- LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
- LLAMA_LOG_INFO("%s: VRAM used: %.2f MB\n", __func__, vram_weights / 1024.0 / 1024.0);
-#else
- (void) n_gpu_layers;
-#endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
- }
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ /*
+ llama_model_loader: - tensor 4: blk.0.attn_output.weight f16 [ 2560, 2560, 1, 1 ]
+ */
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
- // populate `tensors_by_name`
- for (int i = 0; i < ml.n_tensors; ++i) {
- struct ggml_tensor * cur = ggml_get_tensor(ctx, ml.get_tensor_name(i));
- model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
- }
+ auto & layer = model.layers[i];
- (void) tensor_split;
-#ifdef GGML_USE_CUBLAS
- {
- ggml_cuda_set_tensor_split(tensor_split);
- }
-#endif
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
+ layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
- ml.load_all_data(ctx, progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL);
+ layer.wq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, backend_split);
+ layer.wk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, backend_split);
+ layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
- if (progress_callback) {
- progress_callback(1.0f, progress_callback_user_data);
- }
+ layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
+ layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
- model.mapping = std::move(ml.mapping);
+ layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
+ layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ }
+ } break;
+ case LLM_ARCH_QWEN:
+ {
+ model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+ {
+ ggml_backend_type backend_norm;
+ ggml_backend_type backend_output;
- // loading time will be recalculate after the first eval, so
- // we take page faults deferred by mmap() into consideration
- model.t_load_us = ggml_time_us() - model.t_start_us;
-}
+ if (n_gpu_layers > int(n_layer)) {
+ backend_norm = llama_backend_offload;
+ backend_output = llama_backend_offload_split;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
-static bool llama_model_load(const std::string & fname, llama_model & model, const llama_model_params & params) {
- try {
- llama_model_loader ml(fname, params.use_mmap);
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
+ }
- model.hparams.vocab_only = params.vocab_only;
+ const uint32_t n_ff = hparams.n_ff / 2;
- llm_load_arch (ml, model);
- llm_load_hparams(ml, model);
- llm_load_vocab (ml, model);
+ const int i_gpu_start = n_layer - n_gpu_layers;
- llm_load_print_meta(ml, model);
+ model.layers.resize(n_layer);
+
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
+
+ auto & layer = model.layers[i];
+
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
+
+ layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd * 3}, backend_split);
+ layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd * 3}, backend);
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+
+ layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
+
+ layer.ffn_gate = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, backend_split);
+ layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ }
+ } break;
+ case LLM_ARCH_PHI2:
+ {
+ model.tok_embd = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
+
+ // output
+ {
+ ggml_backend_type backend_norm;
+ ggml_backend_type backend_output;
+
+ if (n_gpu_layers > int(n_layer)) {
+ backend_norm = llama_backend_offload;
+ backend_output = llama_backend_offload;
+ } else {
+ backend_norm = GGML_BACKEND_CPU;
+ backend_output = GGML_BACKEND_CPU;
+ }
+
+ model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
+ model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
+ model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
+ model.output_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab}, backend_output);
+ }
+
+ const uint32_t n_ff = hparams.n_ff;
+
+ const int i_gpu_start = n_layer - n_gpu_layers;
+
+ model.layers.resize(n_layer);
+
+ for (uint32_t i = 0; i < n_layer; ++i) {
+ const ggml_backend_type backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload; // NOLINT
+ const ggml_backend_type backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : llama_backend_offload_split; // NOLINT
+
+ auto & layer = model.layers[i];
+
+ layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
+ layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
+
+ layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
+ layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend);
+
+ layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
+ layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
+
+ layer.ffn_down = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
+ layer.ffn_down_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend);
+
+ layer.ffn_up = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
+ layer.ffn_up_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend);
+ }
+ } break;
+ default:
+ throw std::runtime_error("unknown architecture");
+ }
+ }
+
+ ml.done_getting_tensors();
+
+ ml.init_mapping();
+
+ // allocate tensors
+ size_t vram_weights = 0;
+ size_t buf_size = 0;
+
+ ggml_backend_buffer_type_t buft = llama_default_buffer_type(n_gpu_layers);
+
+ for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
+ // GGML_BACKEND_GPU tensors are for CUDA and OpenCL only, which are handled separately without ggml-backend
+ if (t->backend == GGML_BACKEND_CPU) {
+ buf_size += GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), ggml_backend_buft_get_alignment(buft));
+ } else {
+ vram_weights += ggml_nbytes(t);
+ }
+ }
+
+ // create backend buffer
+ ggml_backend_buffer_t buf_mmap = nullptr;
+
+#ifdef GGML_USE_METAL
+ if (n_gpu_layers > 0) {
+ if (ml.use_mmap) {
+ const size_t max_size = ggml_get_max_tensor_size(ctx);
+ model.buf = ggml_backend_metal_buffer_from_ptr(ml.mapping->addr, ml.mapping->size, max_size);
+ buf_mmap = model.buf;
+ } else {
+ model.buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_metal_buffer_type());
+ }
+ }
+#elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST)
+ // for testing only
+ if (n_gpu_layers > 0) {
+ model.buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, ggml_backend_cuda_buffer_type(0));
+ }
+#endif
+
+ if (model.buf == nullptr) {
+ // CPU backend, and indirectly CUDA and OpenCL
+ if (ml.use_mmap) {
+ model.buf = ggml_backend_cpu_buffer_from_ptr(ml.mapping->addr, ml.mapping->size);
+ buf_mmap = model.buf;
+ } else {
+ // allocate only CPU tensors
+ model.buf = ggml_backend_buft_alloc_buffer(buft, buf_size);
+ ggml_tallocr_t alloc = ggml_tallocr_new_from_buffer(model.buf);
+ for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) {
+ if (t->backend == GGML_BACKEND_CPU) {
+ ggml_tallocr_alloc(alloc, t);
+ }
+ }
+ ggml_tallocr_free(alloc);
+ }
+ }
+
+ if (use_mlock && ggml_backend_buffer_is_host(model.buf)) {
+ model.mlock_buf.init (ggml_backend_buffer_get_base(model.buf));
+ model.mlock_buf.grow_to(ggml_backend_buffer_get_size(model.buf));
+ }
+
+ // print memory requirements
+ {
+ size_t sys_mem_required = ctx_size + buf_size;
+
+ if (sys_mem_required > 0) {
+ LLAMA_LOG_INFO("%s: system memory used = %7.2f MiB\n", __func__, sys_mem_required / 1024.0 / 1024.0);
+ }
+ if (vram_weights > 0) {
+ LLAMA_LOG_INFO("%s: VRAM used = %7.2f MiB\n", __func__, vram_weights / 1024.0 / 1024.0);
+ }
+
+#if (defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)) || defined(GGML_USE_CLBLAST)
+ const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
+
+ LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
+ if (n_gpu_layers > (int) hparams.n_layer) {
+ LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__);
+ }
+
+ const int max_backend_supported_layers = hparams.n_layer + 1;
+ const int max_offloadable_layers = hparams.n_layer + 1;
+
+ LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
+#endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
+ }
+
+#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
+ ggml_cuda_set_tensor_split(tensor_split);
+#else
+ GGML_UNUSED(tensor_split);
+#endif // GGML_USE_CUBLAS
+
+ // populate tensors_by_name
+ for (int i = 0; i < ml.n_tensors; ++i) {
+ struct ggml_tensor * cur = ggml_get_tensor(ctx, ml.get_tensor_name(i));
+ model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
+ }
+
+ if (!ml.load_all_data(ctx, progress_callback, progress_callback_user_data, buf_mmap, use_mlock ? &model.mlock_mmap : NULL)) {
+ return false;
+ }
+
+ model.mapping = std::move(ml.mapping);
+
+ // loading time will be recalculate after the first eval, so
+ // we take page faults deferred by mmap() into consideration
+ model.t_load_us = ggml_time_us() - model.t_start_us;
+ return true;
+}
+
+// Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
+static int llama_model_load(const std::string & fname, llama_model & model, const llama_model_params & params) {
+ try {
+ llama_model_loader ml(fname, params.use_mmap, params.kv_overrides);
+
+ model.hparams.vocab_only = params.vocab_only;
+
+ llm_load_arch (ml, model);
+ llm_load_hparams(ml, model);
+ llm_load_vocab (ml, model);
+
+ llm_load_print_meta(ml, model);
if (model.hparams.n_vocab != model.vocab.id_to_token.size()) {
throw std::runtime_error("vocab size mismatch");
if (params.vocab_only) {
LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
- return true;
+ return 0;
}
- llm_load_tensors(
+ if (!llm_load_tensors(
ml, model, params.n_gpu_layers, params.main_gpu, params.tensor_split, params.use_mlock,
params.progress_callback, params.progress_callback_user_data
- );
+ )) {
+ return -2;
+ }
} catch (const std::exception & err) {
LLAMA_LOG_ERROR("error loading model: %s\n", err.what());
- return false;
+ return -1;
}
- return true;
+ return 0;
}
//
struct ggml_cgraph * graph,
llm_rope_type type,
int64_t n_ctx,
- int64_t n_rot,
+ int n_rot,
float freq_base,
float freq_scale,
const llm_build_cb & cb) {
struct ggml_tensor * tmp =
// we rotate only the first n_rot dimensions
ggml_rope_custom_inplace(ctx,
- ggml_view_3d(ctx, kv.k,
- n_rot, n_head_kv, n_ctx,
- ggml_element_size(kv.k)*n_embd_head,
- ggml_element_size(kv.k)*n_embd_gqa,
- ggml_element_size(kv.k)*n_embd_gqa*n_ctx*il),
+ ggml_view_3d(ctx, kv.k_l[il],
+ n_embd_head, n_head_kv, n_ctx,
+ ggml_row_size(kv.k_l[il]->type, n_embd_head),
+ ggml_row_size(kv.k_l[il]->type, n_embd_gqa),
+ 0),
K_shift, n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
ext_factor, attn_factor, beta_fast, beta_slow);
cb(tmp, "K_shifted", il);
//struct ggml_tensor * v_cur_t = ggml_transpose(ctx, v_cur); // TODO: reshape above is likely not needed
cb(v_cur_t, "v_cur_t", il);
- struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k, n_tokens*n_embd_gqa,
- (ggml_element_size(kv.k)*n_embd_gqa)*(il*n_ctx + kv_head));
+ struct ggml_tensor * k_cache_view = ggml_view_1d(ctx, kv.k_l[il], n_tokens*n_embd_gqa,
+ (ggml_row_size(kv.k_l[il]->type, n_embd_gqa))*kv_head);
cb(k_cache_view, "k_cache_view", il);
- struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, kv.v, n_tokens, n_embd_gqa,
- ( n_ctx)*ggml_element_size(kv.v),
- (il*n_ctx)*ggml_element_size(kv.v)*n_embd_gqa + kv_head*ggml_element_size(kv.v));
+ struct ggml_tensor * v_cache_view = ggml_view_2d(ctx, kv.v_l[il], n_tokens, n_embd_gqa,
+ ( n_ctx)*ggml_element_size(kv.v_l[il]),
+ (kv_head)*ggml_element_size(kv.v_l[il]));
cb(v_cache_view, "v_cache_view", il);
// important: storing RoPE-ed version of K in the KV cache!
// if max_alibi_bias > 0 then apply ALiBi
static struct ggml_tensor * llm_build_kqv(
struct ggml_context * ctx,
+ const llama_model & model,
const llama_hparams & hparams,
const llama_kv_cache & kv,
struct ggml_tensor * wo,
struct ggml_tensor * wo_b,
struct ggml_tensor * q_cur,
- struct ggml_tensor * kq_scale,
struct ggml_tensor * kq_mask,
int64_t n_ctx,
int32_t n_tokens,
int32_t n_kv,
float max_alibi_bias,
+ float kq_scale,
const llm_build_cb & cb,
int il) {
const int64_t n_embd = hparams.n_embd;
cb(q, "q", il);
struct ggml_tensor * k =
- ggml_view_3d(ctx, kv.k,
+ ggml_view_3d(ctx, kv.k_l[il],
n_embd_head, n_kv, n_head_kv,
- ggml_element_size(kv.k)*n_embd_gqa,
- ggml_element_size(kv.k)*n_embd_head,
- ggml_element_size(kv.k)*n_embd_gqa*n_ctx*il);
+ ggml_row_size(kv.k_l[il]->type, n_embd_gqa),
+ ggml_row_size(kv.k_l[il]->type, n_embd_head),
+ 0);
cb(k, "k", il);
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
cb(kq, "kq", il);
- kq = ggml_scale(ctx, kq, kq_scale);
- cb(kq, "kq_scaled", il);
+ if (model.arch == LLM_ARCH_PHI2) {
+ // for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
+ // ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
+ ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
+ }
if (max_alibi_bias > 0.0f) {
- // TODO: n_head or n_head_kv
- // TODO: K-shift is likely not working
- // TODO: change to ggml_add
- kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, max_alibi_bias);
- cb(kq, "kq_scaled_alibi", il);
- }
+ // temporary branch until we figure out how to handle ggml_alibi through ggml_add
+ kq = ggml_scale(ctx, kq, kq_scale);
+ cb(kq, "kq_scaled", il);
+
+ if (max_alibi_bias > 0.0f) {
+ // TODO: n_head or n_head_kv
+ // TODO: K-shift is likely not working
+ // TODO: change to ggml_add
+ kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, max_alibi_bias);
+ cb(kq, "kq_scaled_alibi", il);
+ }
- kq = ggml_add(ctx, kq, kq_mask);
- cb(kq, "kq_masked", il);
+ kq = ggml_add(ctx, kq, kq_mask);
+ cb(kq, "kq_masked", il);
- kq = ggml_soft_max(ctx, kq);
- cb(kq, "kq_soft_max", il);
+ kq = ggml_soft_max(ctx, kq);
+ cb(kq, "kq_soft_max", il);
+ } else {
+ kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale);
+ cb(kq, "kq_soft_max_ext", il);
+ }
// split cached v into n_head heads
struct ggml_tensor * v =
- ggml_view_3d(ctx, kv.v,
+ ggml_view_3d(ctx, kv.v_l[il],
n_kv, n_embd_head, n_head_kv,
- ggml_element_size(kv.v)*n_ctx,
- ggml_element_size(kv.v)*n_ctx*n_embd_head,
- ggml_element_size(kv.v)*n_ctx*n_embd_gqa*il);
+ ggml_element_size(kv.v_l[il])*n_ctx,
+ ggml_element_size(kv.v_l[il])*n_ctx*n_embd_head,
+ 0);
cb(v, "v", il);
struct ggml_tensor * kqv = ggml_mul_mat(ctx, v, kq);
const int64_t n_head_kv;
const int64_t n_embd_head;
const int64_t n_embd_gqa;
+ const int64_t n_expert;
+ const int64_t n_expert_used;
const float freq_base;
const float freq_scale;
const llm_build_cb & cb;
- llama_buffer & buf_compute;
+ std::vector<uint8_t> & buf_compute_meta;
struct ggml_context * ctx0 = nullptr;
const llama_batch & batch,
const llm_build_cb & cb,
bool worst_case) :
- model (lctx.model),
- hparams (model.hparams),
- cparams (lctx.cparams),
- batch (batch),
- kv_self (lctx.kv_self),
- n_embd (hparams.n_embd),
- n_layer (hparams.n_layer),
- n_ctx (cparams.n_ctx),
- n_head (hparams.n_head),
- n_head_kv (hparams.n_head_kv),
- n_embd_head (hparams.n_embd_head()),
- n_embd_gqa (hparams.n_embd_gqa()),
- freq_base (cparams.rope_freq_base),
- freq_scale (cparams.rope_freq_scale),
- ext_factor (cparams.yarn_ext_factor),
- attn_factor (cparams.yarn_attn_factor),
- beta_fast (cparams.yarn_beta_fast),
- beta_slow (cparams.yarn_beta_slow),
- norm_eps (hparams.f_norm_eps),
- norm_rms_eps (hparams.f_norm_rms_eps),
- n_tokens (batch.n_tokens),
- n_kv (worst_case ? n_ctx : kv_self.n),
- kv_head (worst_case ? n_ctx - n_tokens : kv_self.head),
- n_orig_ctx (cparams.n_yarn_orig_ctx),
- do_rope_shift (worst_case || kv_self.has_shift),
- cb (cb),
- buf_compute (lctx.buf_compute) {
+ model (lctx.model),
+ hparams (model.hparams),
+ cparams (lctx.cparams),
+ batch (batch),
+ kv_self (lctx.kv_self),
+ n_embd (hparams.n_embd),
+ n_layer (hparams.n_layer),
+ n_ctx (cparams.n_ctx),
+ n_head (hparams.n_head),
+ n_head_kv (hparams.n_head_kv),
+ n_embd_head (hparams.n_embd_head()),
+ n_embd_gqa (hparams.n_embd_gqa()),
+ n_expert (hparams.n_expert),
+ n_expert_used (hparams.n_expert_used),
+ freq_base (cparams.rope_freq_base),
+ freq_scale (cparams.rope_freq_scale),
+ ext_factor (cparams.yarn_ext_factor),
+ attn_factor (cparams.yarn_attn_factor),
+ beta_fast (cparams.yarn_beta_fast),
+ beta_slow (cparams.yarn_beta_slow),
+ norm_eps (hparams.f_norm_eps),
+ norm_rms_eps (hparams.f_norm_rms_eps),
+ n_tokens (batch.n_tokens),
+ n_kv (worst_case ? n_ctx : kv_self.n),
+ kv_head (worst_case ? n_ctx - n_tokens : kv_self.head),
+ n_orig_ctx (cparams.n_yarn_orig_ctx),
+ do_rope_shift (worst_case || kv_self.has_shift),
+ cb (cb),
+ buf_compute_meta (lctx.buf_compute_meta) {
GGML_ASSERT(!!kv_self.ctx);
// all initializations should be done in init()
void init() {
struct ggml_init_params params = {
- /*.mem_size =*/ buf_compute.size,
- /*.mem_buffer =*/ buf_compute.data,
+ /*.mem_size =*/ buf_compute_meta.size(),
+ /*.mem_buffer =*/ buf_compute_meta.data(),
/*.no_alloc =*/ true,
};
}
struct ggml_cgraph * build_llama() {
- struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
GGML_ASSERT(n_embd_head == hparams.n_rot);
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
cb(inp_pos, "inp_pos", -1);
- // KQ_scale
- struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- cb(KQ_scale, "KQ_scale", -1);
-
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
cb(KQ_mask, "KQ_mask", -1);
// compute Q and K and RoPE them
struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
cb(Qcur, "Qcur", il);
+ if (model.layers[il].bq) {
+ Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
+ cb(Qcur, "Qcur", il);
+ }
struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
cb(Kcur, "Kcur", il);
+ if (model.layers[il].bk) {
+ Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
+ cb(Kcur, "Kcur", il);
+ }
struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
cb(Vcur, "Vcur", il);
+ if (model.layers[il].bv) {
+ Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
+ cb(Vcur, "Vcur", il);
+ }
Qcur = ggml_rope_custom(
ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
- model.layers[il].wo, NULL,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
+ model.layers[il].wo, model.layers[il].bo,
+ Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
cb(ffn_inp, "ffn_inp", il);
// feed-forward network
- {
+ if (model.layers[il].ffn_gate_inp == nullptr) {
cur = llm_build_norm(ctx0, ffn_inp, hparams,
model.layers[il].ffn_norm, NULL,
LLM_NORM_RMS, cb, il);
model.layers[il].ffn_down, NULL,
LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
cb(cur, "ffn_out", il);
+ } else {
+ // MoE branch
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
+ model.layers[il].ffn_norm, NULL,
+ LLM_NORM_RMS, cb, il);
+ cb(cur, "ffn_norm", il);
+
+ ggml_tensor * logits = ggml_mul_mat(ctx0, model.layers[il].ffn_gate_inp, cur); // [n_tokens, num_experts]
+ cb(logits, "ffn_moe_logits", il);
+
+ ggml_tensor * probs = ggml_soft_max(ctx0, logits); // [n_tokens, num_experts]
+ cb(probs, "ffn_moe_probs", il);
+
+ // select experts
+ ggml_tensor * selected_experts = ggml_top_k(ctx0, probs, n_expert_used); // [n_tokens, num_experts_per_tok]
+ cb(selected_experts->src[0], "ffn_moe_argsort", il);
+
+ ggml_tensor * weights = ggml_get_rows(ctx0,
+ ggml_reshape_3d(ctx0, probs, 1, n_expert, n_tokens), selected_experts);
+ cb(weights, "ffn_moe_weights", il);
+
+ weights = ggml_reshape_2d(ctx0, weights, n_expert_used, n_tokens); // [n_tokens, num_experts_per_tok]
+
+ ggml_tensor * weights_sum = ggml_sum_rows(ctx0, weights);
+ cb(weights_sum, "ffn_moe_weights_sum", il);
+
+ weights = ggml_div(ctx0, weights, weights_sum); // [n_tokens, num_experts_per_tok]
+ cb(weights, "ffn_moe_weights_norm", il);
+
+ // compute expert outputs
+ ggml_tensor * moe_out = nullptr;
+
+ for (int i = 0; i < n_expert_used; ++i) {
+ ggml_tensor * cur_expert;
+
+ ggml_tensor * cur_up = ggml_mul_mat_id(ctx0, model.layers[il].ffn_up_exp, n_expert, selected_experts, i, cur);
+ cb(cur_up, "ffn_moe_up", il);
+
+ ggml_tensor * cur_gate = ggml_mul_mat_id(ctx0, model.layers[il].ffn_gate_exp, n_expert, selected_experts, i, cur);
+ cb(cur_gate, "ffn_moe_gate", il);
+
+ cur_gate = ggml_silu(ctx0, cur_gate);
+ cb(cur_gate, "ffn_moe_silu", il);
+
+ cur_expert = ggml_mul(ctx0, cur_up, cur_gate); // [n_tokens, n_embd]
+ cb(cur_expert, "ffn_moe_gate_par", il);
+
+ cur_expert = ggml_mul_mat_id(ctx0, model.layers[il].ffn_down_exp, n_expert, selected_experts, i, cur_expert); // [n_tokens, n_embd]
+ cb(cur_expert, "ffn_moe_down", il);
+
+ cur_expert = ggml_mul(ctx0, cur_expert,
+ ggml_view_2d(ctx0, weights, 1, n_tokens, weights->nb[1], i*weights->nb[0]));
+ cb(cur_expert, "ffn_moe_weighted", il);
+
+ if (i == 0) {
+ moe_out = cur_expert;
+ } else {
+ moe_out = ggml_add(ctx0, moe_out, cur_expert);
+ cb(moe_out, "ffn_moe_out", il);
+ }
+ }
+
+ cur = moe_out;
}
cur = ggml_add(ctx0, cur, ffn_inp);
}
struct ggml_cgraph * build_baichuan() {
- struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
cb(inp_pos, "inp_pos", -1);
- // KQ_scale
- struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- cb(KQ_scale, "KQ_scale", -1);
-
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
cb(KQ_mask, "KQ_mask", -1);
// apply ALiBi for 13B model
const float max_alibi_bias = model.type == MODEL_13B ? 8.0f : -1.0f;
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, NULL,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, cb, il);
+ Qcur, KQ_mask, n_ctx, n_tokens, n_kv, max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
}
struct ggml_cgraph * build_falcon() {
- struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
cb(inp_pos, "inp_pos", -1);
- // KQ_scale
- struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- cb(KQ_scale, "KQ_scale", -1);
-
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
cb(KQ_mask, "KQ_mask", -1);
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, NULL,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
}
struct ggml_cgraph * build_starcoder() {
- struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
struct ggml_tensor * cur;
struct ggml_tensor * pos;
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
cb(inp_pos, "inp_pos", -1);
- // KQ_scale
- struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- cb(KQ_scale, "KQ_scale", -1);
-
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
cb(KQ_mask, "KQ_mask", -1);
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, model.layers[il].bo,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
}
struct ggml_cgraph * build_persimmon() {
- struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
const int64_t n_rot = n_embd_head / 2;
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
cb(inpL, "imp_embd", -1);
+ // inp_pos - contains the positions
struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
cb(inp_pos, "inp_pos", -1);
- // KQ_scale
- struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- cb(KQ_scale, "KQ_scale", -1);
-
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
cb(KQ_mask, "KQ_mask", -1);
struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass);
cb(Kcur, "Kcur", il);
- struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 1, 2, 0, 3));
+ struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 2, 1, 0, 3));
cb(Q, "Q", il);
Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3));
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
// TODO: not tested, could be broken
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, model.layers[il].bo,
- Q, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, cb, il);
+ Q, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
}
struct ggml_cgraph * build_refact() {
- struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
cb(inpL, "inp_embd", -1);
- // KQ_scale
- struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- cb(KQ_scale, "KQ_scale", -1);
-
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
cb(KQ_mask, "KQ_mask", -1);
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, NULL,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il);
+ Qcur, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
}
struct ggml_cgraph * build_bloom() {
- struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
cb(inpL, "inp_embd", -1);
- // KQ_scale
- struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- cb(KQ_scale, "KQ_scale", -1);
-
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
cb(KQ_mask, "KQ_mask", -1);
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, model.layers[il].bo,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, cb, il);
+ Qcur, KQ_mask, n_ctx, n_tokens, n_kv, 8.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
}
struct ggml_cgraph * build_mpt() {
- struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
cb(inpL, "inp_embd", -1);
- // KQ_scale
- struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
- cb(KQ_scale, "KQ_scale", -1);
-
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
cb(KQ_mask, "KQ_mask", -1);
llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
- cur = llm_build_kqv(ctx0, hparams, kv_self,
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
model.layers[il].wo, NULL,
- Qcur, KQ_scale, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, cb, il);
+ Qcur, KQ_mask, n_ctx, n_tokens, n_kv, hparams.f_max_alibi_bias, 1.0f/sqrtf(float(n_embd_head)), cb, il);
cb(cur, "kqv_out", il);
}
cur = llm_build_ffn(ctx0, cur,
model.layers[il].ffn_up, NULL,
NULL, NULL,
- model.layers[il].ffn_down, NULL,
+ model.layers[il].ffn_down, NULL,
+ LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
+ cb(cur, "ffn_out", il);
+ }
+
+ cur = ggml_add(ctx0, cur, ffn_inp);
+ cb(cur, "l_out", il);
+
+ // input for next layer
+ inpL = cur;
+ }
+
+ cur = inpL;
+
+ cur = llm_build_norm(ctx0, cur, hparams,
+ model.output_norm,
+ NULL,
+ LLM_NORM, cb, -1);
+ cb(cur, "result_norm", -1);
+
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output", -1);
+
+ ggml_build_forward_expand(gf, cur);
+
+ return gf;
+ }
+
+ struct ggml_cgraph * build_stablelm() {
+ struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
+
+ inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
+ cb(inpL, "inp_embd", -1);
+
+ // inp_pos - contains the positions
+ struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
+ cb(inp_pos, "inp_pos", -1);
+
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ cb(KQ_mask, "KQ_mask", -1);
+
+ // shift the entire K-cache if needed
+ if (do_rope_shift) {
+ llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, hparams.n_rot, freq_base, freq_scale, cb);
+ }
+
+ for (int il = 0; il < n_layer; ++il) {
+ struct ggml_tensor * inpSA = inpL;
+
+ // norm
+ cur = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm,
+ model.layers[il].attn_norm_b,
+ LLM_NORM, cb, il);
+ cb(cur, "attn_norm", il);
+
+ // self-attention
+ {
+ // compute Q and K and RoPE them
+ struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
+ cb(Qcur, "Qcur", il);
+
+ struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
+ cb(Kcur, "Kcur", il);
+
+ struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
+ cb(Vcur, "Vcur", il);
+
+ Qcur = ggml_rope_custom(
+ ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos,
+ hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(Qcur, "Qcur", il);
+
+ Kcur = ggml_rope_custom(
+ ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
+ hparams.n_rot, 2, 0, n_orig_ctx, freq_base, freq_scale,
+ ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(Kcur, "Kcur", il);
+
+ llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
+
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
+ model.layers[il].wo, NULL,
+ Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
+ cb(cur, "kqv_out", il);
+ }
+
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
+ cb(ffn_inp, "ffn_inp", il);
+
+ // feed-forward network
+ {
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
+ model.layers[il].ffn_norm,
+ model.layers[il].ffn_norm_b,
+ LLM_NORM, cb, il);
+ cb(cur, "ffn_norm", il);
+
+ cur = llm_build_ffn(ctx0, cur,
+ model.layers[il].ffn_up, NULL,
+ model.layers[il].ffn_gate, NULL,
+ model.layers[il].ffn_down, NULL,
+ LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
+ cb(cur, "ffn_out", il);
+ }
+
+ cur = ggml_add(ctx0, cur, ffn_inp);
+ cb(cur, "l_out", il);
+
+ // input for next layer
+ inpL = cur;
+ }
+
+ cur = inpL;
+
+ cur = llm_build_norm(ctx0, cur, hparams,
+ model.output_norm,
+ model.output_norm_b,
+ LLM_NORM, cb, -1);
+ cb(cur, "result_norm", -1);
+
+ // lm_head
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output", -1);
+
+ ggml_build_forward_expand(gf, cur);
+
+ return gf;
+ }
+
+ struct ggml_cgraph * build_qwen() {
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
+
+ struct ggml_tensor * cur;
+ struct ggml_tensor * inpL;
+
+ inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
+ cb(inpL, "inp_embd", -1);
+
+ // inp_pos - contains the positions
+ struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
+ cb(inp_pos, "inp_pos", -1);
+
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ cb(KQ_mask, "KQ_mask", -1);
+
+ // shift the entire K-cache if needed
+ if (do_rope_shift) {
+ llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
+ }
+
+ for (int il = 0; il < n_layer; ++il) {
+ struct ggml_tensor * inpSA = inpL;
+
+ cur = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm, NULL,
+ LLM_NORM_RMS, cb, il);
+ cb(cur, "attn_norm", il);
+
+ // self-attention
+ {
+ cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
+ cb(cur, "wqkv", il);
+
+ cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
+ cb(cur, "bqkv", il);
+
+ struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 2*sizeof(float)*(n_embd)));
+
+ cb(Qcur, "Qcur", il);
+ cb(Kcur, "Kcur", il);
+ cb(Vcur, "Vcur", il);
+
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+
+ // using mode = 2 for neox mode
+ Qcur = ggml_rope_custom(
+ ctx0, Qcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
+ freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(Qcur, "Qcur", il);
+
+ Kcur = ggml_rope_custom(
+ ctx0, Kcur, inp_pos, n_embd_head, 2, 0, n_orig_ctx,
+ freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(Kcur, "Kcur", il);
+
+ llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
+
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
+ model.layers[il].wo, NULL,
+ Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f/sqrtf(float(n_embd_head)), cb, il);
+ cb(cur, "kqv_out", il);
+ }
+
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
+ cb(ffn_inp, "ffn_inp", il);
+
+ // feed-forward forward
+ {
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
+ model.layers[il].ffn_norm, NULL,
+ LLM_NORM_RMS, cb, il);
+ cb(cur, "ffn_norm", il);
+
+ cur = llm_build_ffn(ctx0, cur,
+ model.layers[il].ffn_up, NULL,
+ model.layers[il].ffn_gate, NULL,
+ model.layers[il].ffn_down, NULL,
+ LLM_FFN_SILU, LLM_FFN_PAR, cb, il);
+ cb(cur, "ffn_out", il);
+ }
+
+ cur = ggml_add(ctx0, cur, ffn_inp);
+ cb(cur, "l_out", il);
+
+ // input for next layer
+ inpL = cur;
+ }
+
+ cur = inpL;
+
+ cur = llm_build_norm(ctx0, cur, hparams,
+ model.output_norm, NULL,
+ LLM_NORM_RMS, cb, -1);
+ cb(cur, "result_norm", -1);
+
+ // lm_head
+ cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output", -1);
+
+ ggml_build_forward_expand(gf, cur);
+
+ return gf;
+ }
+ struct ggml_cgraph * build_phi2() {
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
+
+ struct ggml_tensor * cur;
+ struct ggml_tensor * attn_norm_output;
+ struct ggml_tensor * ffn_output;
+ struct ggml_tensor * inpL;
+
+ inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, cb);
+ cb(inpL, "inp_embd", -1);
+
+ // inp_pos - contains the positions
+ struct ggml_tensor * inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
+ cb(inp_pos, "inp_pos", -1);
+
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+ struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
+ cb(KQ_mask, "KQ_mask", -1);
+
+ // shift the entire K-cache if needed
+ if (do_rope_shift) {
+ llm_build_k_shift(ctx0, hparams, cparams, kv_self, gf, LLM_ROPE_NEOX, n_ctx, n_embd_head, freq_base, freq_scale, cb);
+ }
+
+ for (int il = 0; il < n_layer; ++il) {
+ attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
+ model.layers[il].attn_norm,
+ model.layers[il].attn_norm_b,
+ LLM_NORM, cb, il);
+ cb(attn_norm_output, "attn_norm", il);
+
+ // self-attention
+ {
+ cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, attn_norm_output);
+ cb(cur, "wqkv", il);
+
+ cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
+ cb(cur, "bqkv", il);
+
+ struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd)));
+ struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa)));
+
+ cb(Qcur, "Qcur", il);
+ cb(Kcur, "Kcur", il);
+ cb(Vcur, "Vcur", il);
+
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+
+ Qcur = ggml_rope_custom(
+ ctx0, Qcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
+ freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(Qcur, "Qcur", il);
+
+ // with phi2, we scale the Q to avoid precision issues
+ // ref: https://github.com/ml-explore/mlx-examples/blob/08e862336ade809bc37d1035f94b359e7d1a5152/phi2/phi2.py#L64-L66
+ Qcur = ggml_scale(ctx0, Qcur, 1.0f/sqrtf(float(n_embd_head)));
+ cb(Qcur, "Qcur", il);
+
+ Kcur = ggml_rope_custom(
+ ctx0, Kcur, inp_pos, hparams.n_rot, 2, 0, n_orig_ctx,
+ freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow
+ );
+ cb(Kcur, "Kcur", il);
+
+ llm_build_kv_store(ctx0, hparams, kv_self, gf, Kcur, Vcur, n_ctx, n_tokens, kv_head, cb, il);
+
+ cur = llm_build_kqv(ctx0, model, hparams, kv_self,
+ model.layers[il].wo, model.layers[il].bo,
+ Qcur, KQ_mask, n_ctx, n_tokens, n_kv, -1.0f, 1.0f, cb, il);
+ cb(cur, "kqv_out", il);
+ }
+
+ // FF
+ {
+ ffn_output = llm_build_ffn(ctx0, attn_norm_output,
+ model.layers[il].ffn_up, model.layers[il].ffn_up_b,
+ NULL, NULL,
+ model.layers[il].ffn_down, model.layers[il].ffn_down_b,
LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
- cb(cur, "ffn_out", il);
+ cb(ffn_output, "ffn_out", il);
}
- cur = ggml_add(ctx0, cur, ffn_inp);
+ cur = ggml_add(ctx0, cur, ffn_output);
+ cb(cur, "l_out", il);
+
+ cur = ggml_add(ctx0, cur, inpL);
cb(cur, "l_out", il);
- // input for next layer
inpL = cur;
}
- cur = inpL;
-
- cur = llm_build_norm(ctx0, cur, hparams,
+ cur = llm_build_norm(ctx0, inpL, hparams,
model.output_norm,
- NULL,
+ model.output_norm_b,
LLM_NORM, cb, -1);
cb(cur, "result_norm", -1);
cur = ggml_mul_mat(ctx0, model.output, cur);
+ cb(cur, "result_output_no_bias", -1);
+
+ cur = ggml_add(ctx0, cur, model.output_b);
cb(cur, "result_output", -1);
ggml_build_forward_expand(gf, cur);
enum llm_offload_func_e {
OFFLOAD_FUNC_NOP,
OFFLOAD_FUNC,
- OFFLOAD_FUNC_KQ,
- OFFLOAD_FUNC_V,
+ OFFLOAD_FUNC_FRC, // force offload
+ OFFLOAD_FUNC_KQV,
OFFLOAD_FUNC_NR,
- OFFLOAD_FUNC_EMB,
+ OFFLOAD_FUNC_EMB, // embeddings
OFFLOAD_FUNC_OUT,
};
//{ "inp_embd", OFFLOAD_FUNC_NR }, // TODO: missing K-quants get_rows kernel
{ "pos_embd", OFFLOAD_FUNC_NR },
- { "inp_pos", OFFLOAD_FUNC_KQ }, // this is often used for KQ ops (e.g. rope)
- { "KQ_scale", OFFLOAD_FUNC_KQ },
- { "KQ_mask", OFFLOAD_FUNC_KQ },
- { "K_shift", OFFLOAD_FUNC_KQ },
- { "K_shifted", OFFLOAD_FUNC_KQ },
+ { "inp_pos", OFFLOAD_FUNC_FRC }, // this is often used for KQ ops (e.g. rope)
+ { "KQ_mask", OFFLOAD_FUNC_FRC },
+ { "K_shift", OFFLOAD_FUNC_FRC },
+
+ { "K_shifted", OFFLOAD_FUNC },
{ "inp_norm", OFFLOAD_FUNC_NR },
{ "inp_norm_w", OFFLOAD_FUNC_NR },
{ "attn_norm", OFFLOAD_FUNC },
{ "attn_norm_2", OFFLOAD_FUNC },
- { "wqkv", OFFLOAD_FUNC_KQ },
- { "bqkv", OFFLOAD_FUNC_KQ },
- { "wqkv_clamped", OFFLOAD_FUNC_KQ },
-
- { "tmpk", OFFLOAD_FUNC_KQ },
- { "tmpq", OFFLOAD_FUNC_KQ },
- { "tmpv", OFFLOAD_FUNC_V },
- { "Kcur", OFFLOAD_FUNC_KQ },
- { "Qcur", OFFLOAD_FUNC_KQ },
- { "Vcur", OFFLOAD_FUNC_V },
-
- { "krot", OFFLOAD_FUNC_KQ },
- { "qrot", OFFLOAD_FUNC_KQ },
- { "kpass", OFFLOAD_FUNC_KQ },
- { "qpass", OFFLOAD_FUNC_KQ },
- { "krotated", OFFLOAD_FUNC_KQ },
- { "qrotated", OFFLOAD_FUNC_KQ },
-
- { "q", OFFLOAD_FUNC_KQ },
- { "k", OFFLOAD_FUNC_KQ },
- { "kq", OFFLOAD_FUNC_KQ },
- { "kq_scaled", OFFLOAD_FUNC_KQ },
- { "kq_scaled_alibi", OFFLOAD_FUNC_KQ },
- { "kq_masked", OFFLOAD_FUNC_KQ },
- { "kq_soft_max", OFFLOAD_FUNC_V },
- { "v", OFFLOAD_FUNC_V },
- { "kqv", OFFLOAD_FUNC_V },
- { "kqv_merged", OFFLOAD_FUNC_V },
- { "kqv_merged_cont", OFFLOAD_FUNC_V },
- { "kqv_wo", OFFLOAD_FUNC_V },
- { "kqv_out", OFFLOAD_FUNC_V },
+ { "wqkv", OFFLOAD_FUNC_KQV },
+ { "bqkv", OFFLOAD_FUNC_KQV },
+ { "wqkv_clamped", OFFLOAD_FUNC_KQV },
+
+ { "tmpk", OFFLOAD_FUNC_KQV },
+ { "tmpq", OFFLOAD_FUNC_KQV },
+ { "tmpv", OFFLOAD_FUNC_KQV },
+ { "Kcur", OFFLOAD_FUNC_KQV },
+ { "Qcur", OFFLOAD_FUNC_KQV },
+ { "Vcur", OFFLOAD_FUNC_KQV },
+
+ { "krot", OFFLOAD_FUNC_KQV },
+ { "qrot", OFFLOAD_FUNC_KQV },
+ { "kpass", OFFLOAD_FUNC_KQV },
+ { "qpass", OFFLOAD_FUNC_KQV },
+ { "krotated", OFFLOAD_FUNC_KQV },
+ { "qrotated", OFFLOAD_FUNC_KQV },
+
+ { "q", OFFLOAD_FUNC_KQV },
+ { "k", OFFLOAD_FUNC_KQV },
+ { "kq", OFFLOAD_FUNC_KQV },
+ { "kq_scaled", OFFLOAD_FUNC_KQV },
+ { "kq_scaled_alibi", OFFLOAD_FUNC_KQV },
+ { "kq_masked", OFFLOAD_FUNC_KQV },
+ { "kq_soft_max", OFFLOAD_FUNC_KQV },
+ { "kq_soft_max_ext", OFFLOAD_FUNC_KQV },
+ { "v", OFFLOAD_FUNC_KQV },
+ { "kqv", OFFLOAD_FUNC_KQV },
+ { "kqv_merged", OFFLOAD_FUNC_KQV },
+ { "kqv_merged_cont", OFFLOAD_FUNC_KQV },
+ { "kqv_wo", OFFLOAD_FUNC_KQV },
+ { "kqv_out", OFFLOAD_FUNC_KQV },
{ "ffn_inp", OFFLOAD_FUNC },
{ "ffn_norm", OFFLOAD_FUNC },
{ "ffn_relu", OFFLOAD_FUNC },
{ "ffn_sqr(relu)", OFFLOAD_FUNC },
+ { "ffn_moe_logits", OFFLOAD_FUNC },
+ { "ffn_moe_probs", OFFLOAD_FUNC },
+ { "ffn_moe_argsort", OFFLOAD_FUNC },
+ { "ffn_moe_weights", OFFLOAD_FUNC },
+ { "ffn_moe_weights_sum", OFFLOAD_FUNC },
+ { "ffn_moe_weights_norm", OFFLOAD_FUNC },
+ { "ffn_moe_weighted", OFFLOAD_FUNC },
+ { "ffn_moe_up", OFFLOAD_FUNC },
+ { "ffn_moe_gate", OFFLOAD_FUNC },
+ { "ffn_moe_silu", OFFLOAD_FUNC },
+ { "ffn_moe_gate_par", OFFLOAD_FUNC },
+ { "ffn_moe_down", OFFLOAD_FUNC },
+ { "ffn_moe_out", OFFLOAD_FUNC },
+
{ "l_out", OFFLOAD_FUNC },
{ "result_norm", OFFLOAD_FUNC_EMB },
+ { "result_output_no_bias", OFFLOAD_FUNC_EMB },
{ "result_output", OFFLOAD_FUNC_OUT },
};
bool alloc_inp_tokens = false;
bool alloc_inp_embd = false;
bool alloc_inp_pos = false;
- bool alloc_inp_KQ_scale = false;
bool alloc_inp_KQ_mask = false;
bool alloc_inp_K_shift = false;
-#ifdef GGML_USE_CUBLAS
+#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
const bool do_offload = true;
#else
const bool do_offload = true; // TODO: set to false after finishing refactoring
if (!ggml_allocr_is_measure(lctx.alloc) && batch.token) {
const int64_t n_tokens = cur->ne[0];
- memcpy(cur->data, batch.token, n_tokens*ggml_element_size(cur));
+ ggml_backend_tensor_set(cur, batch.token, 0, n_tokens*ggml_element_size(cur));
}
alloc_inp_tokens = true;
const int64_t n_embd = cur->ne[0];
const int64_t n_tokens = cur->ne[1];
- memcpy(cur->data, batch.embd, n_tokens*n_embd*ggml_element_size(cur));
+ ggml_backend_tensor_set(cur, batch.embd, 0, n_tokens*n_embd*ggml_element_size(cur));
}
alloc_inp_embd = true;
if (!ggml_allocr_is_measure(lctx.alloc) && batch.pos) {
const int64_t n_tokens = cur->ne[0];
- int32_t * data = (int32_t *) cur->data;
-
- for (int i = 0; i < n_tokens; ++i) {
- data[i] = batch.pos[i];
- }
+ static_assert(std::is_same<llama_pos, int32_t>::value, "llama_pos must be int32_t");
+ ggml_backend_tensor_set(cur, batch.pos, 0, n_tokens*ggml_element_size(cur));
}
alloc_inp_pos = true;
}
- if (!alloc_inp_KQ_scale && strcmp(name, "KQ_scale") == 0) {
- ggml_allocr_alloc(lctx.alloc, cur);
-
- if (!ggml_allocr_is_measure(lctx.alloc)) {
- const int64_t n_embd_head = model.hparams.n_embd_head();
- ggml_set_f32(cur, 1.0f/sqrtf(float(n_embd_head)));
- }
-
- alloc_inp_KQ_scale = true;
- }
-
if (!alloc_inp_KQ_mask && strcmp(name, "KQ_mask") == 0) {
ggml_allocr_alloc(lctx.alloc, cur);
const int64_t n_kv = cur->ne[0];
const int64_t n_tokens = cur->ne[1];
- float * data = (float *) cur->data;
- memset(data, 0, ggml_nbytes(cur));
+ float * data;
+ if (ggml_backend_buffer_is_host(cur->buffer)) {
+ data = (float *) cur->data;
+ } else {
+ lctx.buf_copy.resize(ggml_nbytes(cur));
+ data = (float *) lctx.buf_copy.data();
+ }
for (int h = 0; h < 1; ++h) {
for (int j = 0; j < n_tokens; ++j) {
const llama_seq_id seq_id = batch.seq_id[j][0];
for (int i = 0; i < n_kv; ++i) {
+ float f;
if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
- data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY;
+ f = -INFINITY;
+ } else {
+ f = 0;
}
+ data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
}
}
}
+
+ if (data != cur->data) {
+ ggml_backend_tensor_set(cur, data, 0, ggml_nbytes(cur));
+ }
}
alloc_inp_KQ_mask = true;
if (!ggml_allocr_is_measure(lctx.alloc)) {
const int64_t n_ctx = cur->ne[0];
- int32_t * data = (int32_t *) cur->data;
+ int32_t * data;
+ if (ggml_backend_buffer_is_host(cur->buffer)) {
+ data = (int32_t *) cur->data;
+ } else {
+ lctx.buf_copy.resize(ggml_nbytes(cur));
+ data = (int32_t *) lctx.buf_copy.data();
+ }
for (int i = 0; i < n_ctx; ++i) {
data[i] = lctx.kv_self.cells[i].delta;
}
+
+ if (data != cur->data) {
+ ggml_backend_tensor_set(cur, data, 0, ggml_nbytes(cur));
+ }
}
alloc_inp_K_shift = true;
static const std::unordered_map<llm_offload_func_e, std::string, std::hash<int>> k_offload_func_name = {
{ OFFLOAD_FUNC_NOP, "CPU" },
{ OFFLOAD_FUNC_OUT, "CPU" },
-#ifdef GGML_USE_CUBLAS
- { OFFLOAD_FUNC, "GPU (CUDA)" },
- { OFFLOAD_FUNC_KQ, "GPU (CUDA) KQ" },
- { OFFLOAD_FUNC_V, "GPU (CUDA) V" },
- { OFFLOAD_FUNC_NR, "GPU (CUDA) NR" },
+#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
+ { OFFLOAD_FUNC, "GPU (CUDA)" },
+ { OFFLOAD_FUNC_FRC, "GPU (CUDA) FRC" },
+ { OFFLOAD_FUNC_KQV, "GPU (CUDA) KQV" },
+ { OFFLOAD_FUNC_NR, "GPU (CUDA) NR" },
{ OFFLOAD_FUNC_EMB, "GPU (CUDA) EMB" },
#else
{ OFFLOAD_FUNC, "CPU" },
- { OFFLOAD_FUNC_KQ, "CPU" },
- { OFFLOAD_FUNC_V, "CPU" },
+ { OFFLOAD_FUNC_FRC, "CPU" },
+ { OFFLOAD_FUNC_KQV, "CPU" },
{ OFFLOAD_FUNC_NR, "CPU" },
{ OFFLOAD_FUNC_EMB, "CPU" },
#endif // GGML_USE_CUBLAS
}
}
break;
- case OFFLOAD_FUNC_NR:
- if (n_gpu_layers <= n_layer + 0) {
+ case OFFLOAD_FUNC_FRC:
+ if (!lctx.cparams.offload_kqv) {
func_e = OFFLOAD_FUNC_NOP;
- }
- break;
- case OFFLOAD_FUNC_V:
- if (n_gpu_layers <= n_layer + 1) {
+ } break;
+ case OFFLOAD_FUNC_KQV:
+ if (!lctx.cparams.offload_kqv) {
func_e = OFFLOAD_FUNC_NOP;
+ } else {
+ if (n_gpu_layers < n_layer) {
+ if (il < i_gpu_start) {
+ func_e = OFFLOAD_FUNC_NOP;
+ }
+ }
}
break;
- case OFFLOAD_FUNC_KQ:
- if (n_gpu_layers <= n_layer + 2) {
+ case OFFLOAD_FUNC_NR:
+ if (n_gpu_layers <= n_layer + 0) {
func_e = OFFLOAD_FUNC_NOP;
}
break;
offload_func_t func = ggml_offload_nop;
// this is needed for compatibility with Metal for example
-#ifdef GGML_USE_CUBLAS
+#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
static offload_func_t ggml_offload_gpu = ggml_cuda_assign_buffers_no_alloc;
#else
static offload_func_t ggml_offload_gpu = ggml_offload_nop;
case OFFLOAD_FUNC_NOP:
case OFFLOAD_FUNC_OUT: func = ggml_offload_nop; break;
case OFFLOAD_FUNC:
- case OFFLOAD_FUNC_KQ:
- case OFFLOAD_FUNC_V:
+ case OFFLOAD_FUNC_KQV:
+ case OFFLOAD_FUNC_FRC:
case OFFLOAD_FUNC_NR:
case OFFLOAD_FUNC_EMB: func = ggml_offload_gpu; break;
default: GGML_ASSERT(false);
{
result = llm.build_mpt();
} break;
+ case LLM_ARCH_STABLELM:
+ {
+ result = llm.build_stablelm();
+ } break;
+ case LLM_ARCH_QWEN:
+ {
+ result = llm.build_qwen();
+ } break;
+ case LLM_ARCH_PHI2:
+ {
+ result = llm.build_phi2();
+ } break;
default:
GGML_ASSERT(false);
}
const int64_t n_embd = hparams.n_embd;
const int64_t n_vocab = hparams.n_vocab;
- // helpers for smoother batch API transistion
+ // helpers for smoother batch API transition
// after deprecating the llama_eval calls, these will be removed
std::vector<llama_pos> pos;
batch.seq_id = seq_id_arr.data();
}
+ // if we have enough unused cells before the current head ->
+ // better to start searching from the beginning of the cache, hoping to fill it
+ if (kv_self.head > kv_self.used + 2*n_tokens) {
+ kv_self.head = 0;
+ }
+
if (!llama_kv_cache_find_slot(kv_self, batch)) {
return 1;
}
// a heuristic, to avoid attending the full cache if it is not yet utilized
// after enough generations, the benefit from this heuristic disappears
// if we start defragmenting the cache, the benefit from this will be more important
- //kv_self.n = std::max(32, GGML_PAD(llama_kv_cache_cell_max(kv_self), 32)); // TODO: this might be better for CUDA?
- kv_self.n = std::min((int32_t) cparams.n_ctx, std::max(32, llama_kv_cache_cell_max(kv_self)));
+ kv_self.n = std::min((int32_t) cparams.n_ctx, std::max(32, GGML_PAD(llama_kv_cache_cell_max(kv_self), 32)));
+ //kv_self.n = llama_kv_cache_cell_max(kv_self);
- //printf("kv_self.n = %d\n", kv_self.n);
+ //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);
ggml_allocr_reset(lctx.alloc);
ggml_allocr_alloc_graph(lctx.alloc, gf);
- struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
- struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
-
- GGML_ASSERT(strcmp(res->name, "result_output") == 0);
- GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
+ // the output is always the last tensor in the graph
+ struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
+ GGML_ASSERT(strcmp(res->name, "result_output") == 0);
+ // the embeddings could be the second to last tensor, or the third to last tensor
+ struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
+ if (strcmp(embeddings->name, "result_norm") != 0) {
+ embeddings = gf->nodes[gf->n_nodes - 3];
+ GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
+ }
-#ifdef GGML_USE_CUBLAS
+#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
+ char * buf_alloc_base = (char *)ggml_backend_buffer_get_base(lctx.buf_alloc);
for (int i = 0; i < gf->n_leafs; i++) {
ggml_tensor * node = gf->leafs[i];
if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) {
- ggml_cuda_assign_scratch_offset(node, (char*)node->data - (char *) lctx.buf_alloc.data);
+ ggml_cuda_assign_scratch_offset(node, (char *)node->data - buf_alloc_base);
ggml_cuda_copy_to_device(node);
}
}
for (int i = 0; i < gf->n_nodes; i++) {
ggml_tensor * node = gf->nodes[i];
if (node->backend == GGML_BACKEND_GPU && node->extra == NULL) {
- ggml_cuda_assign_scratch_offset(node, (char*)node->data - (char *) lctx.buf_alloc.data);
+ ggml_cuda_assign_scratch_offset(node, (char *)node->data - buf_alloc_base);
}
}
n_threads = std::min(4, n_threads);
}
- // If all tensors can be run on the GPU then using more than 1 thread is detrimental.
- const bool full_offload_supported =
- model.arch == LLM_ARCH_LLAMA ||
- model.arch == LLM_ARCH_BAICHUAN ||
- model.arch == LLM_ARCH_FALCON ||
- model.arch == LLM_ARCH_REFACT ||
- model.arch == LLM_ARCH_MPT;
-
- const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3;
- if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) {
+ const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 1;
+ if (ggml_cpu_has_cublas() && fully_offloaded) {
n_threads = 1;
}
-#if GGML_USE_MPI
+#ifdef GGML_USE_MPI
const int64_t n_layer = hparams.n_layer;
ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);
#endif
#ifdef GGML_USE_METAL
- if (lctx.ctx_metal) {
- ggml_metal_set_n_cb (lctx.ctx_metal, n_threads);
- ggml_metal_graph_compute(lctx.ctx_metal, gf);
- } else {
- ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
+ if (ggml_backend_is_metal(lctx.backend)) {
+ ggml_backend_metal_set_n_cb(lctx.backend, n_threads);
}
-#else
- ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
#endif
-#if GGML_USE_MPI
+ if (ggml_backend_is_cpu(lctx.backend)) {
+ ggml_backend_cpu_set_n_threads(lctx.backend, n_threads);
+ }
+ ggml_backend_graph_compute(lctx.backend, gf);
+
+#ifdef GGML_USE_MPI
ggml_mpi_graph_compute_post(lctx.ctx_mpi, gf, n_layer);
#endif
{
auto & logits_out = lctx.logits;
+#ifndef NDEBUG
+ auto & logits_valid = lctx.logits_valid;
+ logits_valid.clear();
+ logits_valid.resize(n_tokens);
+
+ logits_out.clear();
+#endif
+
if (batch.logits) {
logits_out.resize(n_vocab * n_tokens);
for (uint32_t i = 0; i < n_tokens; i++) {
if (batch.logits[i] == 0) {
continue;
}
- memcpy(logits_out.data() + (n_vocab*i), (float *) ggml_get_data(res) + (n_vocab*i), sizeof(float)*n_vocab);
+ ggml_backend_tensor_get(res, logits_out.data() + (n_vocab*i), (n_vocab*i)*sizeof(float), n_vocab*sizeof(float));
+#ifndef NDEBUG
+ logits_valid[i] = true;
+#endif
}
} else if (lctx.logits_all) {
logits_out.resize(n_vocab * n_tokens);
- memcpy(logits_out.data(), (float *) ggml_get_data(res), sizeof(float)*n_vocab*n_tokens);
+ ggml_backend_tensor_get(res, logits_out.data(), 0, n_vocab*n_tokens*sizeof(float));
+#ifndef NDEBUG
+ std::fill(logits_valid.begin(), logits_valid.end(), true);
+#endif
} else {
logits_out.resize(n_vocab);
- memcpy(logits_out.data(), (float *) ggml_get_data(res) + (n_vocab*(n_tokens - 1)), sizeof(float)*n_vocab);
+ ggml_backend_tensor_get(res, logits_out.data(), (n_vocab*(n_tokens - 1))*sizeof(float), n_vocab*sizeof(float));
+#ifndef NDEBUG
+ logits_valid[0] = true;
+#endif
}
}
auto & embedding_out = lctx.embedding;
embedding_out.resize(n_embd);
- memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(n_tokens - 1)), sizeof(float)*n_embd);
+ ggml_backend_tensor_get(embeddings, embedding_out.data(), (n_embd*(n_tokens - 1))*sizeof(float), n_embd*sizeof(float));
}
// measure the performance only for the single-token evals
// loop over the text
while (true) {
- // find the first occurence of a given special token in this fragment
+ // find the first occurrence of a given special token in this fragment
// passing offset argument only limit the "search area" but match coordinates
// are still relative to the source full raw_text
auto match = raw_text->find(special_token, raw_text_base_offset);
- // no occurences found, stop processing this fragment for a given special token
+ // no occurrences found, stop processing this fragment for a given special token
if (match == std::string::npos) break;
// check if match is within bounds of offset <-> length
// by modifying llm_tokenizer_x to operate with string offsets like pre-tokenizer
// and passing 'add space prefix' as bool argument
//
- auto raw_text = (special ? "" : " ") + fragment.raw_text.substr(fragment.offset, fragment.length);
+ auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
+ if (&fragment == &fragment_buffer.front()) {
+ raw_text = " " + raw_text; // prefix with space if the first token is not special
+ }
#ifdef PRETOKENIZERDEBUG
fprintf(stderr,"TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
// Decodes a UTF-8 string which may end in an incomplete sequence. Adds a terminating 0 for use as
// pointer. If an invalid sequence is encountered, returns `llama_partial_utf8.n_remain == -1`.
static std::pair<std::vector<uint32_t>, llama_partial_utf8> decode_utf8(
- const char * src,
+ const std::string & src,
llama_partial_utf8 partial_start) {
static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 3, 4 };
- const char * pos = src;
+ const char * pos = src.c_str();
std::vector<uint32_t> code_points;
+ // common english strings have the same number of codepoints and bytes. `+ 1` for the terminating 0.
+ code_points.reserve(src.size() + 1);
uint32_t value = partial_start.value;
int n_remain = partial_start.n_remain;
// Replace the data in candidates with the new_candidates data
std::copy(new_candidates.begin(), new_candidates.end(), candidates->data);
candidates->size = new_candidates.size();
+ candidates->sorted = false;
if (ctx) {
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
const llama_token eos = llama_token_eos(&ctx->model);
std::vector<std::pair<std::vector<uint32_t>, llama_partial_utf8>> candidates_decoded;
+ candidates_decoded.reserve(candidates->size);
std::vector<llama_grammar_candidate> candidates_grammar;
+ candidates_grammar.reserve(candidates->size);
for (size_t i = 0; i < candidates->size; ++i) {
const llama_token id = candidates->data[i].id;
} else if (piece.empty() || piece[0] == 0) {
candidates->data[i].logit = -INFINITY;
} else {
- candidates_decoded.push_back(decode_utf8(piece.c_str(), grammar->partial_utf8));
+ candidates_decoded.push_back(decode_utf8(piece, grammar->partial_utf8));
candidates_grammar.push_back({ i, candidates_decoded.back().first.data(), candidates_decoded.back().second });
}
}
const std::string piece = llama_token_to_piece(ctx, token);
// Note terminating 0 in decoded string
- const auto decoded = decode_utf8(piece.c_str(), grammar->partial_utf8);
+ const auto decoded = decode_utf8(piece, grammar->partial_utf8);
const auto & code_points = decoded.first;
for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) {
grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it);
}
// Min-heaps are used to efficiently collect the top-k elements (k=n_beams).
- // The repetative patterns below reflect the 2 stages of heaps:
+ // The repetitive patterns below reflect the 2 stages of heaps:
// * Gather elements until the vector is full, then call std::make_heap() on it.
// * If the heap is full and a new element is found that should be included, pop the
// least element to the back(), replace it with the new, then push it into the heap.
// quantization
//
-template <typename T>
-struct no_init {
- T value;
- no_init() { /* do nothing */ }
-};
-
struct quantize_state_internal {
const llama_model & model;
const llama_model_quantize_params * params;
return;
}
- auto block_size = tensor->type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor->type);
- auto block_size_bytes = ggml_type_size(tensor->type);
+ size_t block_size = tensor->type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor->type);
+ size_t block_size_bytes = ggml_type_size(tensor->type);
GGML_ASSERT(nelements % block_size == 0);
- auto nblocks = nelements / block_size;
- auto blocks_per_thread = nblocks / nthread;
- auto spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
+ size_t nblocks = nelements / block_size;
+ size_t blocks_per_thread = nblocks / nthread;
+ size_t spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
+
+ size_t in_buff_offs = 0;
+ size_t out_buff_offs = 0;
- for (auto tnum = 0, in_buff_offs = 0, out_buff_offs = 0; tnum < nthread; tnum++) {
- auto thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
- auto thr_elems = thr_blocks * block_size; // number of elements for this thread
- auto thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
+ for (int tnum = 0; tnum < nthread; tnum++) {
+ size_t thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
+ size_t thr_elems = thr_blocks * block_size; // number of elements for this thread
+ size_t thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
if (typ == GGML_TYPE_F16) {
workers.clear();
}
-static ggml_type get_k_quant_type(
- quantize_state_internal & qs,
- ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype
-) {
+static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
const std::string name = ggml_get_name(tensor);
+
// TODO: avoid hardcoded tensor names - use the TN_* constants
const llm_arch arch = qs.model.arch;
const auto tn = LLM_TN(arch);
// nearly negligible increase in model size by quantizing this tensor with more bits:
if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
}
+ if (qs.model.hparams.n_expert == 8) {
+ // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
+ // TODO: explore better strategies
+ new_type = GGML_TYPE_Q8_0;
+ }
++qs.i_attention_wv;
+ } else if (name.find("attn_k.weight") != std::string::npos) {
+ if (qs.model.hparams.n_expert == 8) {
+ // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
+ // TODO: explore better strategies
+ new_type = GGML_TYPE_Q8_0;
+ }
} else if (name.find("ffn_down.weight") != std::string::npos) {
if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
constexpr bool use_mmap = false;
#endif
- llama_model_loader ml(fname_inp, use_mmap);
- if (ml.use_mmap) {
- ml.mapping.reset(new llama_mmap(&ml.file, /* prefetch */ 0, ggml_is_numa()));
- }
+ llama_model_loader ml(fname_inp, use_mmap, NULL);
+ ml.init_mapping(false); // no prefetching?
llama_model model;
llm_load_arch(ml, model);
bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
// quantize only 2D tensors
- quantize &= (tensor->n_dims == 2);
+ quantize &= (ggml_n_dims(tensor) == 2);
quantize &= params->quantize_output_tensor || name != "output.weight";
quantize &= !params->only_copy;
+ // do not quantize expert gating tensors
+ quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
+
enum ggml_type new_type;
void * new_data;
size_t new_size;
workers.clear();
}
- LLAMA_LOG_INFO("size = %8.2f MB -> %8.2f MB | hist: ", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
+ LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB | hist: ", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
int64_t tot_count = 0;
for (size_t i = 0; i < hist_cur.size(); i++) {
hist_all[i] += hist_cur[i];
const int64_t t_start_lora_us = ggml_time_us();
- auto fin = std::ifstream(path_lora, std::ios::binary);
- if (!fin) {
- LLAMA_LOG_ERROR("%s: failed to open '%s'\n", __func__, path_lora);
- return 1;
- }
+ llama_file fin(path_lora, "rb");
// verify magic and version
{
- uint32_t magic;
- fin.read((char *) &magic, sizeof(magic));
- uint32_t format_version;
- fin.read((char *) &format_version, sizeof(format_version));
+ uint32_t magic = fin.read_u32();
+ if (magic != LLAMA_FILE_MAGIC_GGLA) {
+ LLAMA_LOG_ERROR("%s: bad file magic\n", __func__);
+ return 1;
+ }
+ uint32_t format_version = fin.read_u32();
if (format_version != 1) {
LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ );
return 1;
}
}
- int32_t lora_r;
- int32_t lora_alpha;
- fin.read((char *) &lora_r, sizeof(lora_r));
- fin.read((char *) &lora_alpha, sizeof(lora_alpha));
+ int32_t lora_r = fin.read_u32();
+ int32_t lora_alpha = fin.read_u32();
float scaling = scale * (float)lora_alpha / (float)lora_r;
LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling);
+ // create a name -> tensor map of the model to accelerate lookups
+ // find the max tensor size to estimate the required temporary buffer size
+ size_t max_tensor_size = 0;
+ std::unordered_map<std::string, struct ggml_tensor*> model_tensors;
+ for (const auto & kv : model.tensors_by_name) {
+ model_tensors.insert(kv);
+ size_t f32_size = ggml_nelements(kv.second) * sizeof(float);
+ max_tensor_size = std::max(max_tensor_size, f32_size);
+ }
+
// create a temporary ggml context to store the lora tensors
- // todo: calculate size from biggest possible tensor
- std::vector<uint8_t> lora_buf(1024ull * 1024ull * 1024ull);
+ // TODO: use ggml-alloc
+ size_t lora_ctx_size = max_tensor_size * 3;
+ LLAMA_LOG_INFO("%s: allocating %.f MB for lora temporary buffer\n", __func__, lora_ctx_size / 1024.0 / 1024.0);
+ std::vector<uint8_t> lora_buf(lora_ctx_size);
+
struct ggml_init_params params;
params.mem_size = lora_buf.size();
params.mem_buffer = lora_buf.data();
params.no_alloc = false;
- ggml_context * lora_ctx = ggml_init(params);
- std::unordered_map<std::string, struct ggml_tensor *> lora_tensors;
+ using unique_context = std::unique_ptr<ggml_context, decltype(&ggml_free)>;
- // create a name -> tensor map of the model to accelerate lookups
- std::unordered_map<std::string, struct ggml_tensor*> model_tensors;
- for (const auto & kv : model.tensors_by_name) {
- model_tensors.insert(kv);
- }
+ unique_context lora_ctx(nullptr, ggml_free);
+ lora_ctx.reset(ggml_init(params));
+ std::unordered_map<std::string, struct ggml_tensor *> lora_tensors;
// load base model
std::unique_ptr<llama_model_loader> ml;
- ggml_context * base_ctx = NULL;
- std::vector<uint8_t> base_buf;
- if (path_base_model) {
- LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
- ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true));
-
- size_t ctx_size;
- size_t mmapped_size;
- ml->calc_sizes(ctx_size, mmapped_size);
- base_buf.resize(ctx_size);
- ggml_init_params base_params;
- base_params.mem_size = base_buf.size();
- base_params.mem_buffer = base_buf.data();
- base_params.no_alloc = ml->use_mmap;
-
- base_ctx = ggml_init(base_params);
-
- // maybe this should in llama_model_loader
- if (ml->use_mmap) {
- ml->mapping.reset(new llama_mmap(&ml->file, /* prefetch */ 0, ggml_is_numa()));
- }
+ if (path_base_model) {
+ LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model);
+ ml.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true, /*kv_overrides*/ nullptr));
+ ml->init_mapping(false); // no prefetching
}
// read tensors and apply
std::vector<uint8_t> work_buffer;
while (true) {
+ if (fin.tell() == fin.size) {
+ // eof
+ break;
+ }
+
int32_t n_dims;
- int32_t length;
+ int32_t name_len;
int32_t ftype;
- fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
- fin.read(reinterpret_cast<char *>(&length), sizeof(length));
- fin.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
- if (fin.eof()) {
- break;
+ fin.read_raw(&n_dims, sizeof(n_dims));
+ fin.read_raw(&name_len, sizeof(name_len));
+ fin.read_raw(&ftype, sizeof(ftype));
+
+ if (n_dims != 1 && n_dims != 2) {
+ LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
+ return 1;
}
int32_t ne[2] = { 1, 1 };
for (int i = 0; i < n_dims; ++i) {
- fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
+ fin.read_raw(&ne[i], sizeof(ne[i]));
}
std::string name;
{
+ GGML_ASSERT(name_len <= 1024);
char buf[1024];
- fin.read(buf, length);
- name = std::string(buf, length);
+ fin.read_raw(buf, name_len);
+ name = std::string(buf, name_len);
}
// check for lora suffix and get the type of tensor
std::string lora_type = name.substr(pos + lora_suffix.length());
std::string base_name = name;
base_name.erase(pos);
- // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(),base_name.c_str(), lora_type.c_str());
+ // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(), base_name.c_str(), lora_type.c_str());
if (model_tensors.find(base_name) == model_tensors.end()) {
LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data());
return false;
}
}
- ggml_tensor * lora_tensor;
- if (n_dims == 2) {
- lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]);
- }
- else {
- LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims);
- return 1;
- }
- ggml_set_name(lora_tensor, "lora_tensor");
+ ggml_tensor * lora_tensor = ggml_new_tensor_2d(lora_ctx.get(), wtype, ne[0], ne[1]);
+ ggml_set_name(lora_tensor, name.c_str());
// load tensor data
- size_t offset = fin.tellg();
+ size_t offset = fin.tell();
size_t tensor_data_size = ggml_nbytes(lora_tensor);
offset = (offset + 31) & -32;
- fin.seekg(offset);
- fin.read((char*)lora_tensor->data, tensor_data_size);
+ fin.seek(offset, SEEK_SET);
+ fin.read_raw(lora_tensor->data, tensor_data_size);
lora_tensors[name] = lora_tensor;
offload_func_t offload_func = ggml_offload_nop;
offload_func_t offload_func_force_inplace = ggml_offload_nop;
-#ifdef GGML_USE_CUBLAS
+#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) {
if (dest_t->type != GGML_TYPE_F16) {
throw std::runtime_error(format(
// load from base model
if (gguf_find_tensor(ctx_gguf, base_name.c_str()) < 0) {
- // TODO: throw
LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str());
return 1;
}
- // TODO: not tested!! maybe not working!
- base_t = ml->create_tensor(base_ctx, base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU);
+ base_t = ml->get_tensor_meta(base_name.c_str());
ml->load_data_for(base_t);
} else {
base_t = dest_t;
}
// w = w + BA*s
- ggml_tensor * BA = ggml_mul_mat(lora_ctx, loraA, loraB);
+ ggml_tensor * BA = ggml_mul_mat(lora_ctx.get(), loraA, loraB);
offload_func(BA);
ggml_set_name(BA, "BA");
if (scaling != 1.0f) {
- ggml_tensor * scale_tensor = ggml_new_f32(lora_ctx, scaling);
- ggml_set_name(scale_tensor, "scale_tensor");
-
- BA = ggml_scale_inplace(lora_ctx, BA, scale_tensor);
+ BA = ggml_scale_inplace(lora_ctx.get(), BA, scaling);
offload_func(BA);
ggml_set_name(BA, "BA_scaled");
}
ggml_tensor * r;
if (base_t == dest_t) {
- r = ggml_add_inplace(lora_ctx, dest_t, BA);
+ r = ggml_add_inplace(lora_ctx.get(), dest_t, BA);
offload_func_force_inplace(r);
ggml_set_name(r, "r_add_inplace");
}
else {
- r = ggml_add(lora_ctx, base_t, BA);
+ r = ggml_add(lora_ctx.get(), base_t, BA);
offload_func(r);
ggml_set_name(r, "r_add");
- r = ggml_cpy(lora_ctx, r, dest_t);
+ r = ggml_cpy(lora_ctx.get(), r, dest_t);
offload_func(r);
ggml_set_name(r, "r_cpy");
}
- struct ggml_cgraph * gf = ggml_new_graph(lora_ctx);
+ struct ggml_cgraph * gf = ggml_new_graph(lora_ctx.get());
ggml_build_forward_expand(gf, r);
ggml_graph_compute_helper(work_buffer, gf, n_threads);
+ // the tensors in the adapter must be sorted such that loraA and loraB of the same tensor are next to each other
+ GGML_ASSERT(lora_tensors.size() == 2);
+
// we won't need these tensors again, reset the context to save memory
- ggml_free(lora_ctx);
- lora_ctx = ggml_init(params);
+ lora_ctx.reset(ggml_init(params));
lora_tensors.clear();
n_tensors++;
}
}
- // TODO: this should be in a destructor, it will leak on failure
- ggml_free(lora_ctx);
- if (base_ctx) {
- ggml_free(base_ctx);
- }
-
const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0);
/*.tensor_split =*/ nullptr,
/*.progress_callback =*/ nullptr,
/*.progress_callback_user_data =*/ nullptr,
+ /*.kv_overrides =*/ nullptr,
/*.vocab_only =*/ false,
/*.use_mmap =*/ true,
/*.use_mlock =*/ false,
/*.yarn_beta_fast =*/ 32.0f,
/*.yarn_beta_slow =*/ 1.0f,
/*.yarn_orig_ctx =*/ 0,
+ /*.type_k =*/ GGML_TYPE_F16,
+ /*.type_v =*/ GGML_TYPE_F16,
/*.mul_mat_q =*/ true,
- /*.f16_kv =*/ true,
/*.logits_all =*/ false,
/*.embedding =*/ false,
+ /*.offload_kqv =*/ true,
};
return result;
LLAMA_LOG_INFO("\n");
}
}
+ return true;
};
}
- if (!llama_model_load(path_model, *model, params)) {
- LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
+ int status = llama_model_load(path_model, *model, params);
+ GGML_ASSERT(status <= 0);
+ if (status < 0) {
+ if (status == -1) {
+ LLAMA_LOG_ERROR("%s: failed to load model\n", __func__);
+ } else if (status == -2) {
+ LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
+ }
delete model;
return nullptr;
}
cparams.yarn_beta_fast = params.yarn_beta_fast;
cparams.yarn_beta_slow = params.yarn_beta_slow;
cparams.mul_mat_q = params.mul_mat_q;
+ cparams.offload_kqv = params.offload_kqv;
cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx;
cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
ctx->rng = std::mt19937(params.seed);
ctx->logits_all = params.logits_all;
- ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
+ const ggml_type type_k = params.type_k;
+ const ggml_type type_v = params.type_v;
+
+ GGML_ASSERT(hparams.n_embd_head() % ggml_blck_size(type_k) == 0);
+ GGML_ASSERT(hparams.n_embd_head() % ggml_blck_size(type_v) == 0);
// reserve memory for context buffers
if (!hparams.vocab_only) {
- if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, cparams.n_ctx, model->n_gpu_layers)) {
+ // initialize backend
+#ifdef GGML_USE_METAL
+ if (model->n_gpu_layers > 0) {
+ ctx->backend = ggml_backend_metal_init();
+ if (ctx->backend == nullptr) {
+ LLAMA_LOG_ERROR("%s: failed to initialize Metal backend\n", __func__);
+ }
+ }
+#elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST)
+ // for testing only
+ if (model->n_gpu_layers > 0) {
+ ctx->backend = ggml_backend_cuda_init(0);
+ if (ctx->backend == nullptr) {
+ LLAMA_LOG_ERROR("%s: failed to initialize CUDA backend\n", __func__);
+ }
+ }
+#endif
+
+ if (ctx->backend == nullptr && ggml_backend_buffer_is_host(model->buf)) {
+ ctx->backend = ggml_backend_cpu_init();
+ if (ctx->backend == nullptr) {
+ LLAMA_LOG_ERROR("%s: failed to initialize CPU backend\n", __func__);
+ }
+ }
+
+ if (ctx->backend == nullptr) {
+ LLAMA_LOG_ERROR("%s: failed to initialize a backend\n", __func__);
+ delete ctx;
+ return nullptr;
+ }
+
+ if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, type_k, type_v,
+ cparams.n_ctx, model->n_gpu_layers, cparams.offload_kqv)) {
LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
llama_free(ctx);
return nullptr;
}
{
- const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v);
- LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
+ size_t memory_size_k = 0;
+ size_t memory_size_v = 0;
+
+ for (auto & k : ctx->kv_self.k_l) {
+ memory_size_k += ggml_nbytes(k);
+ }
+
+ for (auto & v : ctx->kv_self.v_l) {
+ memory_size_v += ggml_nbytes(v);
+ }
+
+ LLAMA_LOG_INFO("%s: KV self size = %7.2f MiB, K (%s): %7.2f MiB, V (%s): %7.2f MiB\n", __func__,
+ (float)(memory_size_k + memory_size_v) / (1024.0f * 1024.0f),
+ ggml_type_name(type_k), (float)memory_size_k / (1024.0f * 1024.0f),
+ ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
}
// resized during inference
}
{
- static const size_t tensor_alignment = 32;
// the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data
- ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead());
+ ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead());
// create measure allocator
- ctx->alloc = ggml_allocr_new_measure(tensor_alignment);
+ ctx->alloc = ggml_allocr_new_measure_from_backend(ctx->backend);
// build worst-case graph
int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_batch);
llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
ggml_cgraph * gf = llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0));
-#ifdef GGML_USE_METAL
- if (model->n_gpu_layers > 0) {
- ggml_metal_log_set_callback(llama_log_callback_default, NULL);
-
- ctx->ctx_metal = ggml_metal_init(1);
- if (!ctx->ctx_metal) {
- LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__);
- llama_free(ctx);
- return NULL;
- }
- //ggml_metal_graph_find_concurrency(ctx->ctx_metal, gf, false);
- //ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
- }
-#endif
// measure memory requirements for the graph
- size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
+ size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf);
- LLAMA_LOG_INFO("%s: compute buffer total size = %.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0);
+ LLAMA_LOG_INFO("%s: compute buffer total size = %.2f MiB\n", __func__, (ctx->buf_compute_meta.size() + alloc_size) / 1024.0 / 1024.0);
- // recreate allocator with exact memory requirements
+ // create allocator again with exact memory requirements
ggml_allocr_free(ctx->alloc);
- ctx->buf_alloc.resize(alloc_size);
- ctx->alloc = ggml_allocr_new(ctx->buf_alloc.data, ctx->buf_alloc.size, tensor_alignment);
-#ifdef GGML_USE_METAL
- if (ctx->ctx_metal) {
- //ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
- }
-#endif
-#ifdef GGML_USE_CUBLAS
- ggml_cuda_set_scratch_size(alloc_size);
- LLAMA_LOG_INFO("%s: VRAM scratch buffer: %.2f MB\n", __func__, alloc_size / 1024.0 / 1024.0);
+ ctx->buf_alloc = ggml_backend_alloc_buffer(ctx->backend, alloc_size);
+ ctx->alloc = ggml_allocr_new_from_buffer(ctx->buf_alloc);
+#if defined(GGML_USE_CUBLAS) && !defined(LLAMA_GGML_BACKEND_CUDA_TEST)
+ if (model->n_gpu_layers > 0) {
+ ggml_cuda_set_scratch_size(alloc_size);
+ LLAMA_LOG_INFO("%s: VRAM scratch buffer: %.2f MiB\n", __func__, alloc_size / 1024.0 / 1024.0);
- // calculate total VRAM usage
- auto add_tensor = [](const ggml_tensor * t, size_t & size) {
- if (t->backend == GGML_BACKEND_GPU || t->backend == GGML_BACKEND_GPU_SPLIT) {
- size += ggml_nbytes(t);
+ // calculate total VRAM usage
+ auto add_tensor = [](const ggml_tensor * t, size_t & size) {
+ if (t->backend == GGML_BACKEND_GPU || t->backend == GGML_BACKEND_GPU_SPLIT) {
+ size += ggml_nbytes(t);
+ }
+ };
+ size_t model_vram_size = 0;
+ for (const auto & kv : model->tensors_by_name) {
+ add_tensor(kv.second, model_vram_size);
}
- };
- size_t model_vram_size = 0;
- for (const auto & kv : model->tensors_by_name) {
- add_tensor(kv.second, model_vram_size);
- }
-
- size_t kv_vram_size = 0;
- add_tensor(ctx->kv_self.k, kv_vram_size);
- add_tensor(ctx->kv_self.v, kv_vram_size);
-
- size_t ctx_vram_size = alloc_size + kv_vram_size;
- size_t total_vram_size = model_vram_size + ctx_vram_size;
-
- LLAMA_LOG_INFO("%s: total VRAM used: %.2f MB (model: %.2f MB, context: %.2f MB)\n", __func__,
- total_vram_size / 1024.0 / 1024.0,
- model_vram_size / 1024.0 / 1024.0,
- ctx_vram_size / 1024.0 / 1024.0);
-#endif
- }
-
-#ifdef GGML_USE_METAL
- if (model->n_gpu_layers > 0) {
- // this allocates all Metal resources and memory buffers
-
- void * data_ptr = NULL;
- size_t data_size = 0;
-
- if (ctx->model.mapping) {
- data_ptr = ctx->model.mapping->addr;
- data_size = ctx->model.mapping->size;
- } else {
- data_ptr = ggml_get_mem_buffer(ctx->model.ctx);
- data_size = ggml_get_mem_size (ctx->model.ctx);
- }
- const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx);
+ size_t kv_vram_size = 0;
+ for (auto & k : ctx->kv_self.k_l) {
+ add_tensor(k, kv_vram_size);
+ }
+ for (auto & v : ctx->kv_self.v_l) {
+ add_tensor(v, kv_vram_size);
+ }
- LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0);
+ size_t ctx_vram_size = alloc_size + kv_vram_size;
+ size_t total_vram_size = model_vram_size + ctx_vram_size;
-#define LLAMA_METAL_CHECK_BUF(result) \
- if (!(result)) { \
- LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \
- llama_free(ctx); \
- return NULL; \
+ LLAMA_LOG_INFO("%s: total VRAM used: %.2f MiB (model: %.2f MiB, context: %.2f MiB)\n", __func__,
+ total_vram_size / 1024.0 / 1024.0,
+ model_vram_size / 1024.0 / 1024.0,
+ ctx_vram_size / 1024.0 / 1024.0);
}
-
- LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size));
- LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0));
- LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "alloc", ctx->buf_alloc.data, ctx->buf_alloc.size, 0));
-#undef LLAMA_METAL_CHECK_BUF
- }
#endif
+ }
}
#ifdef GGML_USE_MPI
return &ctx->model;
}
-int llama_n_ctx(const struct llama_context * ctx) {
+uint32_t llama_n_ctx(const struct llama_context * ctx) {
return ctx->cparams.n_ctx;
}
+uint32_t llama_n_batch(const struct llama_context * ctx) {
+ return ctx->cparams.n_batch;
+}
+
enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
return model->vocab.type;
}
return model->hparams.rope_freq_scale_train;
}
+int llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
+ const auto & it = model->gguf_kv.find(key);
+ if (it == model->gguf_kv.end()) {
+ if (buf_size > 0) {
+ buf[0] = '\0';
+ }
+ return -1;
+ }
+ return snprintf(buf, buf_size, "%s", it->second.c_str());
+}
+
+int llama_model_meta_count(const struct llama_model * model) {
+ return (int)model->gguf_kv.size();
+}
+
+int llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
+ if (i < 0 || i >= (int)model->gguf_kv.size()) {
+ if (buf_size > 0) {
+ buf[0] = '\0';
+ }
+ return -1;
+ }
+ auto it = model->gguf_kv.begin();
+ std::advance(it, i);
+ return snprintf(buf, buf_size, "%s", it->first.c_str());
+}
+
+int llama_model_meta_val_str_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
+ if (i < 0 || i >= (int)model->gguf_kv.size()) {
+ if (buf_size > 0) {
+ buf[0] = '\0';
+ }
+ return -1;
+ }
+ auto it = model->gguf_kv.begin();
+ std::advance(it, i);
+ return snprintf(buf, buf_size, "%s", it->second.c_str());
+}
+
int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
return snprintf(buf, buf_size, "%s %s %s",
llama_model_arch_name(model->arch).c_str(),
}
}
+struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_max_seq) {
+ struct llama_kv_cache_view result = {
+ /*.n_cells = */ 0,
+ /*.n_max_seq = */ n_max_seq,
+ /*.token_count = */ 0,
+ /*.used_cells = */ llama_get_kv_cache_used_cells(ctx),
+ /*.max_contiguous = */ 0,
+ /*.max_contiguous_idx = */ -1,
+ /*.cells = */ nullptr,
+ /*.cells_sequences = */ nullptr,
+ };
+ return result;
+}
+
+void llama_kv_cache_view_free(struct llama_kv_cache_view * view) {
+ if (view->cells != nullptr) {
+ free(view->cells);
+ view->cells = nullptr;
+ }
+ if (view->cells_sequences != nullptr) {
+ free(view->cells_sequences);
+ view->cells_sequences = nullptr;
+ }
+}
+
+void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view) {
+ if (uint32_t(view->n_cells) < ctx->kv_self.size || view->cells == nullptr) {
+ view->n_cells = int32_t(ctx->kv_self.size);
+ void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells);
+ GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells");
+ view->cells = (struct llama_kv_cache_view_cell *)p;
+ p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_max_seq * view->n_cells);
+ GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences");
+ view->cells_sequences = (llama_seq_id *)p;
+ }
+
+ const std::vector<llama_kv_cell> & kv_cells = ctx->kv_self.cells;
+ llama_kv_cache_view_cell * c_curr = view->cells;
+ llama_seq_id * cs_curr = view->cells_sequences;
+ int32_t used_cells = 0;
+ int32_t token_count = 0;
+ int32_t curr_contig_idx = -1;
+ uint32_t max_contig = 0;
+ int32_t max_contig_idx = -1;
+
+ for (int32_t i = 0; i < int32_t(ctx->kv_self.size); i++, c_curr++, cs_curr += view->n_max_seq) {
+ const size_t curr_size = kv_cells[i].seq_id.size();
+ token_count += curr_size;
+ c_curr->pos = kv_cells[i].pos + kv_cells[i].delta;
+
+ if (curr_size > 0) {
+ if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) {
+ max_contig = i - curr_contig_idx;
+ max_contig_idx = curr_contig_idx;
+ }
+ curr_contig_idx = -1;
+ } else if (curr_contig_idx < 0) {
+ curr_contig_idx = i;
+ }
+
+ int seq_idx = 0;
+ for (const llama_seq_id it : kv_cells[i].seq_id) {
+ if (seq_idx >= view->n_max_seq) {
+ break;
+ }
+ cs_curr[seq_idx] = it;
+ seq_idx++;
+ }
+ if (seq_idx != 0) {
+ used_cells++;
+ }
+ for (; seq_idx < view->n_max_seq; seq_idx++) {
+ cs_curr[seq_idx] = -1;
+ }
+ }
+ if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) {
+ max_contig_idx = curr_contig_idx;
+ max_contig = kv_cells.size() - curr_contig_idx;
+ }
+ view->max_contiguous = max_contig;
+ view->max_contiguous_idx = max_contig_idx;
+ view->token_count = token_count;
+ view->used_cells = used_cells;
+ if (uint32_t(used_cells) != ctx->kv_self.used) {
+ LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n",
+ __func__, ctx->kv_self.used, used_cells);
+ }
+}
+
int llama_get_kv_cache_token_count(const struct llama_context * ctx) {
- return ctx->kv_self.head;
+ int result = 0;
+
+ for (uint32_t i = 0; i < ctx->kv_self.size; i++) {
+ result += ctx->kv_self.cells[i].seq_id.size();
+ }
+
+ return result;
+}
+
+int llama_get_kv_cache_used_cells(const struct llama_context * ctx) {
+ return ctx->kv_self.used;
}
void llama_kv_cache_clear(struct llama_context * ctx) {
const size_t s_embedding = ctx->embedding.size() * sizeof(float);
const size_t s_kv_size = sizeof(size_t);
const size_t s_kv_ntok = sizeof(int);
- const size_t s_kv = ctx->kv_self.buf.size;
+ const size_t s_kv = ggml_backend_buffer_get_size(ctx->kv_self.buf);
const size_t s_total = (
+ s_rng_size
const auto n_embd = hparams.n_embd_gqa();
const auto n_ctx = cparams.n_ctx;
- const size_t kv_buf_size = kv_self.buf.size;
+ const size_t kv_buf_size = ggml_backend_buffer_get_size(kv_self.buf);
const uint32_t kv_head = kv_self.head;
const uint32_t kv_size = kv_self.size;
+ const uint32_t kv_used = kv_self.used;
data_ctx->write(&kv_buf_size, sizeof(kv_buf_size));
data_ctx->write(&kv_head, sizeof(kv_head));
data_ctx->write(&kv_size, sizeof(kv_size));
+ data_ctx->write(&kv_used, sizeof(kv_used));
if (kv_buf_size) {
- const size_t elt_size = ggml_element_size(kv_self.k);
+ const size_t elt_size = ggml_element_size(kv_self.k_l[0]);
- ggml_context * cpy_ctx = ggml_init({ 6*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true });
+ ggml_context * cpy_ctx = ggml_init({ 6*n_layer*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true });
ggml_cgraph * gf = ggml_new_graph(cpy_ctx);
- ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_head, n_layer);
- std::vector<uint8_t> kout3d_data(ggml_nbytes(kout3d), 0);
- kout3d->data = kout3d_data.data();
+ std::vector<struct ggml_tensor *> kout2d(n_layer);
+ std::vector<struct ggml_tensor *> vout2d(n_layer);
+
+ for (int il = 0; il < (int) n_layer; ++il) {
+ kout2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.k_l[il]->type, n_embd, kv_head);
+ vout2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.v_l[il]->type, kv_head, n_embd);
+
+ ggml_tensor * k2d = ggml_view_2d(cpy_ctx, kv_self.k_l[il],
+ n_embd, kv_head,
+ elt_size*n_embd, 0);
+
+ ggml_tensor * v2d = ggml_view_2d(cpy_ctx, kv_self.v_l[il],
+ kv_head, n_embd,
+ elt_size*n_ctx, 0);
- ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_head, n_embd, n_layer);
- std::vector<uint8_t> vout3d_data(ggml_nbytes(vout3d), 0);
- vout3d->data = vout3d_data.data();
+ ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, k2d, kout2d[il]));
+ ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, v2d, vout2d[il]));
+ }
+
+ ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(cpy_ctx, ctx->backend);
- ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
- n_embd, kv_head, n_layer,
- elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
+ ggml_backend_graph_compute(ctx->backend, gf);
- ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
- kv_head, n_embd, n_layer,
- elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
+ std::vector<uint8_t> tmp_buf;
+ for (int il = 0; il < (int) n_layer; ++il) {
+ tmp_buf.resize(ggml_nbytes(kout2d[il]));
+ ggml_backend_tensor_get(kout2d[il], tmp_buf.data(), 0, tmp_buf.size());
+ data_ctx->write(tmp_buf.data(), tmp_buf.size());
- ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, k3d, kout3d));
- ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, v3d, vout3d));
- ggml_graph_compute_helper(ctx->work_buffer, gf, /*n_threads*/ 1);
+ tmp_buf.resize(ggml_nbytes(vout2d[il]));
+ ggml_backend_tensor_get(vout2d[il], tmp_buf.data(), 0, tmp_buf.size());
+ data_ctx->write(tmp_buf.data(), tmp_buf.size());
+ }
ggml_free(cpy_ctx);
- // our data is now in the kout3d_data and vout3d_data buffers
- // write them to file
- data_ctx->write(kout3d_data.data(), kout3d_data.size());
- data_ctx->write(vout3d_data.data(), vout3d_data.size());
+ ggml_backend_buffer_free(buf);
}
for (uint32_t i = 0; i < kv_size; ++i) {
size_t kv_buf_size;
uint32_t kv_head;
uint32_t kv_size;
+ uint32_t kv_used;
memcpy(&kv_buf_size, inp, sizeof(kv_buf_size)); inp += sizeof(kv_buf_size);
memcpy(&kv_head, inp, sizeof(kv_head)); inp += sizeof(kv_head);
memcpy(&kv_size, inp, sizeof(kv_size)); inp += sizeof(kv_size);
+ memcpy(&kv_used, inp, sizeof(kv_used)); inp += sizeof(kv_used);
if (kv_buf_size) {
- GGML_ASSERT(kv_self.buf.size == kv_buf_size);
+ GGML_ASSERT(ggml_backend_buffer_get_size(kv_self.buf) == kv_buf_size);
- const size_t elt_size = ggml_element_size(kv_self.k);
+ const size_t elt_size = ggml_element_size(kv_self.k_l[0]);
- ggml_context * cpy_ctx = ggml_init({ 6*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true });
+ ggml_context * cpy_ctx = ggml_init({ 6*n_layer*ggml_tensor_overhead() + ggml_graph_overhead(), NULL, /* no_alloc */ true });
ggml_cgraph * gf = ggml_new_graph(cpy_ctx);
- ggml_tensor * kin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_head, n_layer);
- kin3d->data = (void *) inp;
- inp += ggml_nbytes(kin3d);
+ std::vector<struct ggml_tensor *> kin2d(n_layer);
+ std::vector<struct ggml_tensor *> vin2d(n_layer);
+
+ for (int il = 0; il < n_layer; ++il) {
+ kin2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.k_l[il]->type, n_embd, kv_head);
+ vin2d[il] = ggml_new_tensor_2d(cpy_ctx, kv_self.v_l[il]->type, kv_head, n_embd);
+
+ ggml_tensor * k2d = ggml_view_2d(cpy_ctx, kv_self.k_l[il],
+ n_embd, kv_head,
+ elt_size*n_embd, 0);
+
+ ggml_tensor * v2d = ggml_view_2d(cpy_ctx, kv_self.v_l[il],
+ kv_head, n_embd,
+ elt_size*n_ctx, 0);
+
+ ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, kin2d[il], k2d));
+ ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, vin2d[il], v2d));
+ }
- ggml_tensor * vin3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_head, n_embd, n_layer);
- vin3d->data = (void *) inp;
- inp += ggml_nbytes(vin3d);
+ ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(cpy_ctx, ctx->backend);
- ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k,
- n_embd, kv_head, n_layer,
- elt_size*n_embd, elt_size*n_embd*n_ctx, 0);
+ // load data into the tensors
+ for (int il = 0; il < n_layer; ++il) {
+ ggml_backend_tensor_set(kin2d[il], inp, 0, ggml_nbytes(kin2d[il]));
+ inp += ggml_nbytes(kin2d[il]);
- ggml_tensor * v3d = ggml_view_3d(cpy_ctx, kv_self.v,
- kv_head, n_embd, n_layer,
- elt_size*n_ctx, elt_size*n_ctx*n_embd, 0);
+ ggml_backend_tensor_set(vin2d[il], inp, 0, ggml_nbytes(vin2d[il]));
+ inp += ggml_nbytes(vin2d[il]);
+ }
- ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, kin3d, k3d));
- ggml_build_forward_expand(gf, ggml_cpy(cpy_ctx, vin3d, v3d));
- ggml_graph_compute_helper(ctx->work_buffer, gf, /*n_threads*/ 1);
+ ggml_backend_graph_compute(ctx->backend, gf);
ggml_free(cpy_ctx);
+
+ ggml_backend_buffer_free(buf);
}
ctx->kv_self.head = kv_head;
ctx->kv_self.size = kv_size;
+ ctx->kv_self.used = kv_used;
ctx->kv_self.cells.resize(kv_size);
}
float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
+ assert(ctx->logits_valid.at(i));
return ctx->logits.data() + i*ctx->model.hparams.n_vocab;
}
return model->vocab.linefeed_id;
}
+int llama_add_bos_token(const struct llama_model * model) {
+ return model->vocab.special_add_bos;
+}
+
+int llama_add_eos_token(const struct llama_model * model) {
+ return model->vocab.special_add_eos;
+}
+
llama_token llama_token_prefix(const struct llama_model * model) {
return model->vocab.special_prefix_id;
}
void llama_log_set(ggml_log_callback log_callback, void * user_data) {
g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
g_state.log_callback_user_data = user_data;
+#ifdef GGML_USE_METAL
+ ggml_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
+#endif
}
static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {