From: Ravindra Marella Date: Sat, 13 May 2023 12:24:47 +0000 (+0530) Subject: examples : fix warnings (#152) X-Git-Tag: upstream/0.0.1642~1482 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=935eca6cab7818c786368176f06523cbdaeef71c;p=pkg%2Fggml%2Fsources%2Fggml examples : fix warnings (#152) --- diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 8a8b9f71..ceca69e9 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,3 +1,15 @@ +if (GGML_ALL_WARNINGS) + if (NOT MSVC) + set(cxx_flags + # TODO(marella): Add other warnings. + -Wunused-variable + -Wno-unused-function + -Wno-multichar + ) + add_compile_options("$<$:${cxx_flags}>") + endif() +endif() + add_library(common STATIC common.cpp) target_include_directories(common PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/examples/dolly-v2/main.cpp b/examples/dolly-v2/main.cpp index f6279511..76a29be7 100644 --- a/examples/dolly-v2/main.cpp +++ b/examples/dolly-v2/main.cpp @@ -220,7 +220,6 @@ bool dollyv2_model_load(const std::string & fname, dollyv2_model & model, gpt_vo const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; model.layers.resize(n_layer); @@ -303,7 +302,7 @@ bool dollyv2_model_load(const std::string & fname, dollyv2_model & model, gpt_vo const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); - printf("%s: memory_size = %8.2f MB, n_mem = %lld\n", __func__, memory_size/1024.0/1024.0, n_mem); + printf("%s: memory_size = %8.2f MB, n_mem = %ld\n", __func__, memory_size/1024.0/1024.0, n_mem); } // load weights diff --git a/examples/gpt-j/main.cpp b/examples/gpt-j/main.cpp index 1f658200..c3d2228b 100644 --- a/examples/gpt-j/main.cpp +++ b/examples/gpt-j/main.cpp @@ -206,7 +206,6 @@ bool gptj_model_load(const std::string & fname, gptj_model & model, gpt_vocab & const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; model.layers.resize(n_layer); @@ -391,8 +390,6 @@ bool gptj_eval( const int n_vocab = hparams.n_vocab; const int n_rot = hparams.n_rot; - const int d_key = n_embd/n_head; - static size_t buf_size = 256u*1024*1024; static void * buf = malloc(buf_size); diff --git a/examples/gpt-neox/main.cpp b/examples/gpt-neox/main.cpp index 1f793039..4b18ab2d 100644 --- a/examples/gpt-neox/main.cpp +++ b/examples/gpt-neox/main.cpp @@ -209,7 +209,6 @@ bool gpt_neox_model_load(const std::string & fname, gpt_neox_model & model, gpt_ const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; - const int n_ctx = hparams.n_ctx; const int n_vocab = hparams.n_vocab; model.layers.resize(n_layer); @@ -289,7 +288,7 @@ bool gpt_neox_model_load(const std::string & fname, gpt_neox_model & model, gpt_ const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); - printf("%s: memory_size = %8.2f MB, n_mem = %lld\n", __func__, memory_size/1024.0/1024.0, n_mem); + printf("%s: memory_size = %8.2f MB, n_mem = %ld\n", __func__, memory_size/1024.0/1024.0, n_mem); } // load weights diff --git a/examples/mnist/main.cpp b/examples/mnist/main.cpp index 8036aaf0..70beb36e 100644 --- a/examples/mnist/main.cpp +++ b/examples/mnist/main.cpp @@ -55,8 +55,6 @@ bool mnist_model_load(const std::string & fname, mnist_model & model) { } } - const ggml_type wtype2 = GGML_TYPE_F32; - auto & ctx = model.ctx; size_t ctx_size = 0; @@ -206,7 +204,6 @@ int main(int argc, char ** argv) { fprintf(stderr, "Usage: %s models/mnist/ggml-model-f32.bin models/mnist/t10k-images.idx3-ubyte\n", argv[0]); exit(0); } - const int64_t t_main_start_us = ggml_time_us(); mnist_hparams params; int64_t t_load_us = 0;