fout.write(struct.pack("i", hparams["multiple_of"]))
fout.write(struct.pack("i", hparams["n_heads"]))
fout.write(struct.pack("i", hparams["n_layers"]))
-fout.write(struct.pack("i", 64)) # rot
+fout.write(struct.pack("i", hparams["dim"] // hparams["n_heads"])) # rot (obsolete)
fout.write(struct.pack("i", ftype))
# Is this correct??
const int n_ctx = hparams.n_ctx;
const int n_head = hparams.n_head;
const int n_vocab = hparams.n_vocab;
- const int n_rot = hparams.n_rot;
+ const int n_rot = hparams.n_embd/hparams.n_head;
const int d_key = n_embd/n_head;
params.prompt = gpt_random_prompt(rng);
}
+// params.prompt = R"(// this function checks if the number n is prime
+//bool is_prime(int n) {)";
+
int64_t t_load_us = 0;
gpt_vocab vocab;
if (i >= embd_inp.size()) {
// sample next token
- const int top_k = params.top_k;
const float top_p = params.top_p;
const float temp = params.temp;
{
const int64_t t_start_sample_us = ggml_time_us();
- id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng);
+ id = llama_sample_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_p, temp, rng);
t_sample_us += ggml_time_us() - t_start_sample_us;
}
}
}
- if (l == 0 && t != 13) {
+ if (l == 0) {
break;
}
return logits_id[idx].second;
}
+gpt_vocab::id llama_sample_top_p(
+ const gpt_vocab & vocab,
+ const float * logits,
+ double top_p,
+ double temp,
+ std::mt19937 & rng) {
+ int n_logits = vocab.id_to_token.size();
+
+ std::vector<std::pair<double, gpt_vocab::id>> logits_id;
+ logits_id.reserve(n_logits);
+
+ {
+ const double scale = 1.0/temp;
+ for (int i = 0; i < n_logits; ++i) {
+ logits_id.push_back(std::make_pair(logits[i]*scale, i));
+ }
+ }
+
+ std::sort(
+ logits_id.begin(),
+ logits_id.end(),
+ [](const std::pair<double, gpt_vocab::id> & a, const std::pair<double, gpt_vocab::id> & b) {
+ return a.first > b.first;
+ });
+
+ double maxl = -INFINITY;
+ for (const auto & kv : logits_id) {
+ maxl = std::max(maxl, kv.first);
+ }
+
+ // compute probs for the top K tokens
+ std::vector<double> probs;
+ probs.reserve(logits_id.size());
+
+ double sum = 0.0;
+ for (const auto & kv : logits_id) {
+ double p = exp(kv.first - maxl);
+ probs.push_back(p);
+ sum += p;
+ }
+
+ // normalize the probs
+ for (auto & p : probs) {
+ p /= sum;
+ }
+
+ if (top_p < 1.0f) {
+ double cumsum = 0.0f;
+ for (int i = 0; i < (int) probs.size(); i++) {
+ cumsum += probs[i];
+ if (cumsum >= top_p) {
+ probs.resize(i + 1);
+ logits_id.resize(i + 1);
+ break;
+ }
+ }
+
+ cumsum = 1.0/cumsum;
+ for (int i = 0; i < (int) probs.size(); i++) {
+ probs[i] *= cumsum;
+ }
+ }
+
+ //printf("\n");
+ //for (int i = 0; i < (int) 10; i++) {
+ // printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]);
+ //}
+ //printf("\n\n");
+ //exit(0);
+
+ std::discrete_distribution<> dist(probs.begin(), probs.end());
+ int idx = dist(rng);
+
+ return logits_id[idx].second;
+}
+
+
size_t ggml_quantize_q4_0(float * src, void * dst, int n, int k, int qk, int64_t * hist) {
const int nb = k / qk;
const size_t row_size = nb*(sizeof(float) + sizeof(uint8_t)*qk/2);