]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
server : recognize cache_prompt parameter in OAI API (#4347)
authorGeorgi Gerganov <redacted>
Wed, 6 Dec 2023 18:21:59 +0000 (20:21 +0200)
committerGitHub <redacted>
Wed, 6 Dec 2023 18:21:59 +0000 (20:21 +0200)
examples/server/server.cpp

index 911f7bbe1f85a2ca56c8cb8c3429894a2a7a64de..369f81a8428b2c44ed00a552fbcaabd9bfdf5db8 100644 (file)
@@ -2387,6 +2387,7 @@ json oaicompat_completion_params_parse(
 
     // Map OpenAI parameters to llama.cpp parameters
     llama_params["prompt"]            = format_chatml(body["messages"]); // OpenAI 'messages' to llama.cpp 'prompt'
+    llama_params["cache_prompt"]      = json_value(body, "cache_prompt", false);
     llama_params["temperature"]       = json_value(body, "temperature", 0.8);
     llama_params["top_k"]             = json_value(body, "top_k", 40);
     llama_params["top_p"]             = json_value(body, "top_p", 0.95);