- name: Build
id: make_build
+ env:
+ LLAMA_FATAL_WARNINGS: 1
run: |
CC=gcc-8 make -j $(nproc)
run: |
mkdir build
cd build
- cmake ..
+ cmake .. -DLLAMA_FATAL_WARNINGS=ON
cmake --build . --config Release -j $(nproc)
- name: Test
run: |
mkdir build
cd build
- cmake .. -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
+ cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }}
cmake --build . --config ${{ matrix.build_type }} -j $(nproc)
- name: Test
- name: Build
id: make_build
+ env:
+ LLAMA_FATAL_WARNINGS: 1
run: |
LLAMA_NO_METAL=1 make -j $(sysctl -n hw.logicalcpu)
sysctl -a
mkdir build
cd build
- cmake -DLLAMA_METAL=OFF ..
+ cmake -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_METAL=OFF ..
cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
- name: Test
option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF)
option(LLAMA_GPROF "llama: enable gprof" OFF)
+# build
+option(LLAMA_FATAL_WARNINGS "llama: enable -Werror flag" OFF)
+
# sanitizers
option(LLAMA_SANITIZE_THREAD "llama: enable thread sanitizer" OFF)
option(LLAMA_SANITIZE_ADDRESS "llama: enable address sanitizer" OFF)
find_package(Threads REQUIRED)
include(CheckCXXCompilerFlag)
+if (LLAMA_FATAL_WARNINGS)
+ if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
+ add_compile_options(-Werror)
+ elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
+ add_compile_options(/WX)
+ endif()
+endif()
+
# enable libstdc++ assertions for debug builds
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
add_compile_definitions($<$<CONFIG:Debug>:_GLIBCXX_ASSERTIONS>)
-Werror=implicit-function-declaration
MK_CXXFLAGS += $(WARN_FLAGS) -Wmissing-declarations -Wmissing-noreturn
+ifeq ($(LLAMA_FATAL_WARNINGS),1)
+ MK_CFLAGS += -Werror
+ MK_CXXFLAGS += -Werror
+endif
+
+ifeq ($(CC_IS_CLANG), 1)
+ # clang options
+ MK_CFLAGS += -Wunreachable-code-break -Wunreachable-code-return
+ MK_HOST_CXXFLAGS += -Wunreachable-code-break -Wunreachable-code-return -Wmissing-prototypes -Wextra-semi
+
+ ifneq '' '$(and $(CC_IS_LLVM_CLANG),$(filter 1,$(shell expr $(CC_VER) \>= 030800)))'
+ MK_CFLAGS += -Wdouble-promotion
+ endif
+ ifneq '' '$(and $(CC_IS_APPLE_CLANG),$(filter 1,$(shell expr $(CC_VER) \>= 070300)))'
+ MK_CFLAGS += -Wdouble-promotion
+ endif
+else
+ # gcc options
+ MK_CFLAGS += -Wdouble-promotion
+ MK_HOST_CXXFLAGS += -Wno-array-bounds
+
+ ifeq ($(shell expr $(CC_VER) \>= 070100), 1)
+ MK_HOST_CXXFLAGS += -Wno-format-truncation
+ endif
+ ifeq ($(shell expr $(CC_VER) \>= 080100), 1)
+ MK_HOST_CXXFLAGS += -Wextra-semi
+ endif
+endif
+
# this version of Apple ld64 is buggy
ifneq '' '$(findstring dyld-1015.7,$(shell $(CC) $(LDFLAGS) -Wl,-v 2>&1))'
MK_CPPFLAGS += -DHAVE_BUGGY_APPLE_LINKER
cd $sd/../
SRC=`pwd`
-CMAKE_EXTRA=""
+CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON"
if [ ! -z ${GG_BUILD_METAL} ]; then
CMAKE_EXTRA="${CMAKE_EXTRA} -DLLAMA_METAL_SHADER_DEBUG=ON"
#include <string>
#include <thread>
-static const size_t tensor_alignment = 32;
-
struct lora_info {
std::string filename;
float scale;
}
}
GGML_ASSERT(false && "tensor buffer type not supported by any backend");
+ return -1; // silence warning
}
#if 0
// MSL code
// TODO: move the contents here when ready
// for now it is easier to work in a separate file
-//static NSString * const msl_library_source = @"see metal.metal";
+// static NSString * const msl_library_source = @"see metal.metal";
// Here to assist with NSBundle Path Hack
@interface GGMLMetalClass : NSObject
const __m128 t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), \
_mm256_extractf128_ps(x[0], 1)); \
const __m128 t1 = _mm_hadd_ps(t0, t0); \
- res = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
+ res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); \
} while (0)
// TODO: is this optimal ?
x[i] = _mm_add_ps(x[i], x[offset+i]); \
} \
const __m128 t0 = _mm_hadd_ps(x[0], x[0]); \
- res = _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
+ res = (ggml_float) _mm_cvtss_f32(_mm_hadd_ps(t0, t0)); \
}
// TODO: is this optimal ?
}
}
#else
+ GGML_UNUSED(numa_flag);
// TODO
#endif
}
}
struct ggml_tensor * ggml_set_name(struct ggml_tensor * tensor, const char * name) {
- strncpy(tensor->name, name, sizeof(tensor->name));
+ strncpy(tensor->name, name, sizeof(tensor->name) - 1);
tensor->name[sizeof(tensor->name) - 1] = '\0';
return tensor;
}
(*step) *= width;
}
- GGML_UNREACHABLE();
+ GGML_ASSERT(false && "line search failed");
+
+ return GGML_LINESEARCH_FAIL;
}
static enum ggml_opt_result ggml_opt_lbfgs(
step[0] = 1.0;
}
- GGML_UNREACHABLE();
+ GGML_ASSERT(false && "lbfgs failed");
+
+ return GGML_OPT_DID_NOT_CONVERGE;
}
struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {