const int d_key = n_embd/n_head;
+ // TODO: check if this size scales with n_ctx linearly and remove constant. somehow I feel it wasn't the case
+ // static size_t buf_size = hparams.n_ctx*1024*1024;
static size_t buf_size = 512u*1024*1024;
static void * buf = malloc(buf_size);
// load the model
{
const int64_t t_start_us = ggml_time_us();
-
- if (!llama_model_load(params.model, model, vocab, 512)) { // TODO: set context from user input ??
+ if (!llama_model_load(params.model, model, vocab, params.n_ctx)) {
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
return 1;
}
params.n_predict = std::stoi(argv[++i]);
} else if (arg == "--top_k") {
params.top_k = std::stoi(argv[++i]);
+ } else if (arg == "-c" || arg == "--ctx_size") {
+ params.n_ctx = std::stoi(argv[++i]);
} else if (arg == "--top_p") {
params.top_p = std::stof(argv[++i]);
} else if (arg == "--temp") {
fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", params.top_p);
fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d)\n", params.repeat_last_n);
fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f)\n", params.repeat_penalty);
+ fprintf(stderr, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx);
fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp);
fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch);
fprintf(stderr, " -m FNAME, --model FNAME\n");
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
int32_t n_predict = 128; // new tokens to predict
int32_t repeat_last_n = 64; // last n tokens to penalize
-
+ int32_t n_ctx = 512; //context size
+
// sampling parameters
int32_t top_k = 40;
float top_p = 0.95f;