]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Tell users attmepting to run perplexity with too few tokens to use more (#2882)
authorKawrakow <redacted>
Tue, 29 Aug 2023 20:55:45 +0000 (23:55 +0300)
committerGitHub <redacted>
Tue, 29 Aug 2023 20:55:45 +0000 (23:55 +0300)
Closes #2858

Co-authored-by: Iwan Kawrakow <redacted>
examples/perplexity/perplexity.cpp

index aeb774c5fa496037ea14a394b6587781e1b53864..7c02b6d4058f1777120e93bb4e9ff23afe069910 100644 (file)
@@ -142,6 +142,14 @@ results_perplexity perplexity_v2(llama_context * ctx, const gpt_params & params)
     fprintf(stderr, "%s: tokenizing the input ..\n", __func__);
 
     std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
+
+    if (int(tokens.size()) < 2*params.n_ctx) {
+        fprintf(stderr, "%s: you need at least %d tokens to evaluate perplexity with a context of %d\n",__func__,2*params.n_ctx,
+                params.n_ctx);
+        fprintf(stderr, "%s: the data file you provided tokenizes to only %zu tokens\n",__func__,tokens.size());
+        return {std::move(tokens), 0., {}, {}};
+    }
+
     std::vector<float>       logit_history;
     std::vector<float>       prob_history;
 
@@ -274,6 +282,13 @@ results_perplexity perplexity(llama_context * ctx, const gpt_params & params) {
     auto tim2 = std::chrono::high_resolution_clock::now();
     fprintf(stderr, "%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());
 
+    if (int(tokens.size()) < 2*params.n_ctx) {
+        fprintf(stderr, "%s: you need at least %d tokens to evaluate perplexity with a context of %d\n",__func__,2*params.n_ctx,
+                params.n_ctx);
+        fprintf(stderr, "%s: the data file you provided tokenizes to only %zu tokens\n",__func__,tokens.size());
+        return {std::move(tokens), 0., {}, {}};
+    }
+
     std::vector<float> logit_history;
     logit_history.resize(tokens.size());