]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Remove unused data and add fixes (#5154)
authorMichael Klimenko <redacted>
Sat, 27 Jan 2024 14:25:55 +0000 (15:25 +0100)
committerGitHub <redacted>
Sat, 27 Jan 2024 14:25:55 +0000 (15:25 +0100)
* Remove unused data and add fixes

* Add missing file

* Address review comments

* Replace the scope of vq allocation

common/sampling.cpp
examples/infill/infill.cpp
examples/llava/clip.cpp
examples/server/server.cpp
pocs/vdot/vdot.cpp
tests/test-backend-ops.cpp
tests/test-llama-grammar.cpp

index efd7eab6e50b829bae1a34cfe1293c7e6574fe0b..e8675a8c0c18902da77e8966b4ca81d6cea276b8 100644 (file)
@@ -13,6 +13,7 @@ struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_
         // will be empty (default) if there are parse errors
         if (result->parsed_grammar.rules.empty()) {
             fprintf(stderr, "%s: failed to parse grammar\n", __func__);
+            delete result;
             return nullptr;
         }
 
index 4a7827876e2151a0ceba42f95e7319aa2668c746..72fb133b4fa066f3941f6a6abde8e2b3ded27ea4 100644 (file)
@@ -241,7 +241,7 @@ int main(int argc, char ** argv) {
     LOG("add_bos: %d\n", add_bos);
 
     bool suff_rm_leading_spc = params.escape;
-    if (suff_rm_leading_spc && params.input_suffix.find_first_of(" ") == 0 && params.input_suffix.size() > 1) {
+    if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
         params.input_suffix.erase(0, 1);
         suff_rm_leading_spc = false;
     }
index 4a0338a37677508068a992966ae7b89dac45023a..f2cd86afec457f1959be175ee8f6b36ccc8a48cc 100644 (file)
@@ -1277,7 +1277,6 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
         ".*weight",
     };
 
-    std::vector<uint8_t> read_data(512);
     std::vector<uint8_t> work(512);
     std::vector<float> conv_buf(512);
     std::vector<int64_t> hist_all(1 << 4, 0);
index af63f2f6f7d125105b1fbbe6ceca23932eb58f4b..f58a2acaa8011fcc4f0cb22c0acbba9c9ff95f41 100644 (file)
@@ -681,7 +681,7 @@ struct llama_server_context
                     while ((pos = prompt.find(pattern, pos)) != std::string::npos) {
                         size_t end_prefix = pos;
                         pos += pattern.length();
-                        size_t end_pos = prompt.find("]", pos);
+                        size_t end_pos = prompt.find(']', pos);
                         if (end_pos != std::string::npos)
                         {
                             std::string image_id = prompt.substr(pos, end_pos - pos);
index e96372c4b7107dc89198e7066de1c97b1a8060be..73ffcd1cac5a25398d35f7b2f1582140933e2a64 100644 (file)
@@ -243,7 +243,6 @@ int main(int argc, char** argv) {
     if (useQ4_1) q41.resize(n4);
     else q40.resize(n4);
     std::vector<block_q8_0> q8(n8);
-    std::vector<int64_t> H(16, 0);
     double sumt = 0, sumt2 = 0, maxt = 0;
     double sumqt = 0, sumqt2 = 0, maxqt = 0;
     double sum = 0, sumq = 0, exactSum = 0;
index 55ce14e0d902c50c613d98d6b6cc0ce211119019..e3c656f568dca7ae8991d18a246fa1a7b6dec938 100644 (file)
@@ -102,7 +102,6 @@ static std::vector<float> tensor_to_float(const ggml_tensor * t) {
                     } else if (t->type == GGML_TYPE_I8) {
                         tv.push_back((float)*(int8_t *) &buf[i]);
                     } else if (quantized) {
-                        std::vector<float> vq(ggml_blck_size(t->type));
                         tt.to_float(&buf[i], vq.data(), ggml_blck_size(t->type));
                         tv.insert(tv.end(), vq.begin(), vq.end());
                     } else {
index 73dd33dd286a5c0d3123ed88badd70d1b1f6b975..78fc4111776b74ed07480946f9049560a0aaf979 100644 (file)
@@ -190,7 +190,6 @@ int main()
         index++;
     }
 
-    std::vector<std::vector<const llama_grammar_element *>> next_stacks;
     std::vector<llama_grammar_candidate> next_candidates;
     next_candidates.resize(24);