]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
If first token generated from the server is the stop word the server will crash ...
authormaor-ps <redacted>
Sat, 4 May 2024 09:06:40 +0000 (12:06 +0300)
committerGitHub <redacted>
Sat, 4 May 2024 09:06:40 +0000 (11:06 +0200)
This will reproduce the issue in llama13b
{
'prompt': 'Q: hello world \nA: ',
 'stop': ['\n'],
 'temperature': 0.0,
 'n_predict': 10,
 'cache_prompt': True,
 'n_probs': 10
}

examples/server/server.cpp

index f60530cf3db5617692c4280f0a9c082d8cb69258..ff0814b2f28bfb2d58eab6a8644d4e41898f3cd1 100644 (file)
@@ -1383,9 +1383,10 @@ struct server_context {
             if (!slot.params.stream && slot.stopped_word) {
                 const std::vector<llama_token> stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false);
 
+                size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size());
                 probs = std::vector<completion_token_output>(
                         slot.generated_token_probs.begin(),
-                        slot.generated_token_probs.end() - stop_word_toks.size());
+                        slot.generated_token_probs.end() - safe_offset);
             } else {
                 probs = std::vector<completion_token_output>(
                         slot.generated_token_probs.begin(),