From: Georgi Gerganov Date: Fri, 12 May 2023 18:44:20 +0000 (+0300) Subject: llama : fix --mtest option (close #1414) X-Git-Tag: gguf-v0.4.0~773 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=fb62f924336c9746da9976c6ab3c2e6460258d54;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : fix --mtest option (close #1414) --- diff --git a/examples/main/main.cpp b/examples/main/main.cpp index bd1c4ab5..8543414d 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -121,7 +121,7 @@ int main(int argc, char ** argv) { // uncomment the "used_mem" line in llama.cpp to see the results if (params.mem_test) { { - const std::vector tmp(params.n_batch, 0); + const std::vector tmp(params.n_batch, llama_token_bos()); llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads); }