]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
tests : add a C compliance test (#2848)
authorCebtenzzre <redacted>
Wed, 30 Aug 2023 06:20:26 +0000 (02:20 -0400)
committerGitHub <redacted>
Wed, 30 Aug 2023 06:20:26 +0000 (09:20 +0300)
* tests : add a C compliance test

* make : build C compliance test by default

* make : fix clean and make sure C test fails on clang

* make : move -Werror=implicit-int to CFLAGS

CMakeLists.txt
Makefile
tests/CMakeLists.txt
tests/test-c.c [new file with mode: 0644]

index 1eae2d670c0be101547cdbb953e73f04391ab885..d6c1b3b33d6a2070966452d7d59028b060cd7ee8 100644 (file)
@@ -402,6 +402,7 @@ if (LLAMA_ALL_WARNINGS)
             -Wstrict-prototypes
             -Wpointer-arith
             -Wmissing-prototypes
+            -Werror=implicit-int
         )
         set(cxx_flags
             -Wall
index 02ba3e36d84662919dab9cd21b649558a3a18893..44e68b7fc034f2d4528ae93a1549674f75350833 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
 # Define the default target now so that it is always the first target
-BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple save-load-state server embd-input-test gguf llama-bench baby-llama beam_search
+BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple save-load-state server embd-input-test gguf llama-bench baby-llama beam_search tests/test-c.o
 
 # Binaries only useful for tests
 TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama tests/test-tokenizer-0-falcon tests/test-tokenizer-1
@@ -64,7 +64,7 @@ endif
 
 # warnings
 CFLAGS   += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith \
-                       -Wmissing-prototypes
+                       -Wmissing-prototypes -Werror=implicit-int
 CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar
 
 # OS specific
@@ -369,7 +369,7 @@ libllama.so: llama.o ggml.o $(OBJS)
        $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)
 
 clean:
-       rm -vf *.o *.so *.dll benchmark-matmult build-info.h $(BUILD_TARGETS) $(TEST_TARGETS)
+       rm -vf *.o tests/*.o *.so *.dll benchmark-matmult build-info.h $(BUILD_TARGETS) $(TEST_TARGETS)
 
 #
 # Examples
@@ -489,3 +489,6 @@ tests/test-tokenizer-0-llama: tests/test-tokenizer-0-llama.cpp build-info.h ggml
 
 tests/test-tokenizer-1: tests/test-tokenizer-1.cpp build-info.h ggml.o llama.o common.o $(OBJS)
        $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
+
+tests/test-c.o: tests/test-c.c llama.h
+       $(CC) $(CFLAGS) -c $(filter-out %.h,$^) -o $@
index ca1f39d31b08184c53a27573f11ab7141a374c08..483210d7b89063081539943a9348872ad833a462 100644 (file)
@@ -37,3 +37,8 @@ llama_build_and_test_executable(test-grammar-parser.cpp)
 llama_build_and_test_executable(test-llama-grammar.cpp)
 llama_build_and_test_executable(test-grad0.cpp) # SLOW
 # llama_build_and_test_executable(test-opt.cpp) # SLOW
+
+# dummy executable - not installed
+get_filename_component(TEST_TARGET test-c.c NAME_WE)
+add_executable(${TEST_TARGET} test-c.c)
+target_link_libraries(${TEST_TARGET} PRIVATE llama)
diff --git a/tests/test-c.c b/tests/test-c.c
new file mode 100644 (file)
index 0000000..a050710
--- /dev/null
@@ -0,0 +1,3 @@
+#include "llama.h"
+
+int main(void) {}