]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Fix incorrect format strings and uninitialized variables. (#4133)
authorHaohui Mai <redacted>
Thu, 23 Nov 2023 21:56:53 +0000 (13:56 -0800)
committerGitHub <redacted>
Thu, 23 Nov 2023 21:56:53 +0000 (22:56 +0100)
* Fix incorrect format strings and uninitialized variables.

* Address comments

* Add the missing include statement

examples/server/server.cpp
ggml-cuda.cu

index 1f2c55f2dccdf96d2c0c04ea0921235a4422b623..be23ad1699391e09d2a8bf0c7b2602a7afe76837 100644 (file)
@@ -1095,6 +1095,7 @@ struct llama_server_context
         std::lock_guard<std::mutex> lock(mutex_results);
         task_result res;
         res.id = id;
+        res.stop = false;
         res.error = true;
         res.result_json = { { "content", error } };
         queue_results.push_back(res);
@@ -1255,6 +1256,7 @@ struct llama_server_context
         std::lock_guard<std::mutex> lock(mutex_tasks);
         task_server task;
         task.id = id_gen++;
+        task.target_id = 0;
         task.data = data;
         task.infill_mode = infill;
         task.embedding_mode = embedding;
index 50e03de5007472b82f9e0bb667f098b1213bad83..f0db7ae357a2fefb22c73f3c893eb4f94085b847 100644 (file)
@@ -1,4 +1,5 @@
 #include <algorithm>
+#include <cinttypes>
 #include <cstddef>
 #include <cstdint>
 #include <limits>
@@ -8057,7 +8058,7 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_
     if (tensor->op == GGML_OP_MUL_MAT) {
         if (tensor->src[0]->ne[3] != tensor->src[1]->ne[3]) {
 #ifndef NDEBUG
-            fprintf(stderr, "%s: cannot compute %s: src0->ne[3] = %d, src1->ne[3] = %d - fallback to CPU\n", __func__, tensor->name, tensor->src[0]->ne[3], tensor->src[1]->ne[3]);
+            fprintf(stderr, "%s: cannot compute %s: src0->ne[3] = " PRId64 ", src1->ne[3] = " PRId64 " - fallback to CPU\n", __func__, tensor->name, tensor->src[0]->ne[3], tensor->src[1]->ne[3]);
 #endif
             return false;
         }