-clang-analyzer-security.insecureAPI.DeprecatedOrUnsafeBufferHandling,
performance-*,
portability-*,
+ -portability-simd-intrinsics,
misc-*,
-misc-const-correctness,
-misc-non-private-member-variables-in-classes,
-misc-no-recursion,
+ -misc-use-anonymous-namespace,
FormatStyle: none
run: |
& 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version
+ - name: Install ccache
+ uses: hendrikmuhs/ccache-action@v1.2
+ with:
+ key: ${{ github.job }}
+
- name: Build
id: cmake_build
run: |
# keep standard at C11 and C++11
MK_CPPFLAGS = -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -DGGML_USE_CPU
MK_CFLAGS = -std=c11 -fPIC
-MK_CXXFLAGS = -std=c++11 -fPIC
-MK_NVCCFLAGS = -std=c++11
+MK_CXXFLAGS = -std=c++17 -fPIC
+MK_NVCCFLAGS = -std=c++17
ifdef LLAMA_NO_CCACHE
GGML_NO_CCACHE := 1
ifndef GGML_NO_AMX
MK_CPPFLAGS += -DGGML_USE_AMX
- OBJ_GGML_EXT += ggml/src/ggml-amx/ggml-amx.o ggml/src/ggml-amx/mmq.o
+ OBJ_GGML_EXT += ggml/src/ggml-cpu/amx/amx.o ggml/src/ggml-cpu/amx/mmq.o
endif
+# only necessary for the CPU backend files
+MK_CPPFLAGS += -Iggml/src/ggml-cpu
+
ifdef GGML_RPC
MK_CPPFLAGS += -DGGML_USE_RPC
OBJ_GGML_EXT += ggml/src/ggml-rpc.o
.unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
.unsafeFlags(["-fno-objc-arc"]),
.headerSearchPath("ggml/src"),
+ .headerSearchPath("ggml/src/ggml-cpu"),
// NOTE: NEW_LAPACK will required iOS version 16.4+
// We should consider add this in the future when we drop support for iOS 14
// (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
// .define("ACCELERATE_NEW_LAPACK"),
// .define("ACCELERATE_LAPACK_ILP64")
+ .define("GGML_USE_CPU"),
]
+
#if canImport(Darwin)
sources.append("ggml/src/ggml-common.h")
sources.append("ggml/src/ggml-metal/ggml-metal.m")
contentsOf: [
.define("GGML_USE_ACCELERATE"),
.define("GGML_USE_METAL"),
- .define("GGML_USE_CPU")
]
)
#endif
endif ()
target_include_directories(${TARGET} PUBLIC .)
-target_compile_features (${TARGET} PUBLIC cxx_std_11)
+target_compile_features (${TARGET} PUBLIC cxx_std_17)
target_link_libraries (${TARGET} PRIVATE ${LLAMA_COMMON_EXTRA_LIBS} PUBLIC llama Threads::Threads)
std::u32string filename_utf32;
try {
+#if defined(__clang__)
+ // disable C++17 deprecation warning for std::codecvt_utf8
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
std::wstring_convert<std::codecvt_utf8<char32_t>, char32_t> converter;
+
+#if defined(__clang__)
+# pragma clang diagnostic pop
+#endif
+
filename_utf32 = converter.from_bytes(filename);
// If the reverse conversion mismatches, it means overlong UTF-8 sequences were used,
add_executable(${TARGET} batched-bench.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} batched.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} convert-llama2c-to-ggml.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} cvector-generator.cpp pca.hpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} embedding.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} eval-callback.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
set(TEST_TARGET test-eval-callback)
add_test(NAME ${TEST_TARGET}
add_executable(${TARGET} export-lora.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} gbnf-validator.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} gen-docs.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
target_link_libraries(${TARGET} PRIVATE sha256)
target_link_libraries(${TARGET} PRIVATE ggml ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} gguf-split.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} gguf.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE ggml ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} gritlm.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} imatrix.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} infill.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} llama-bench.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
target_include_directories(llava PUBLIC ../..)
target_include_directories(llava PUBLIC ../../common)
-target_compile_features(llava PRIVATE cxx_std_11)
+target_compile_features(llava PRIVATE cxx_std_17)
add_library(llava_static STATIC $<TARGET_OBJECTS:llava>)
if (BUILD_SHARED_LIBS)
set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-llava-cli)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
set(TARGET llama-minicpmv-cli)
add_executable(${TARGET} minicpmv-cli.cpp)
set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-minicpmv-cli)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} lookahead.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} lookup.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
set(TARGET llama-lookup-create)
add_executable(${TARGET} lookup-create.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
set(TARGET llama-lookup-merge)
add_executable(${TARGET} lookup-merge.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
set(TARGET llama-lookup-stats)
add_executable(${TARGET} lookup-stats.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
target_include_directories(${TARGET} PRIVATE ${_common_path})
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} main.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} parallel.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} passkey.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} perplexity.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT})
target_include_directories(${TARGET} PRIVATE ../../common)
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
target_include_directories(${TARGET} PRIVATE ../../common)
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} retrieval.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} run.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} save-load-state.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
endif()
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} simple-chat.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} simple.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} speculative-simple.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} speculative.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
add_executable(${TARGET} tokenize.cpp)
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
set (GGML_METAL_STD "" CACHE STRING "ggml: metal standard version (-std flag)")
option(GGML_OPENMP "ggml: use OpenMP" ON)
option(GGML_RPC "ggml: use RPC" OFF)
-option(GGML_AMX "ggml: use AMX" OFF)
option(GGML_SYCL "ggml: use SYCL" OFF)
option(GGML_SYCL_F16 "ggml: use 16 bit floats for sycl calculations" OFF)
set (GGML_SYCL_TARGET "INTEL" CACHE STRING
+++ /dev/null
-#pragma once
-
-#include "ggml.h"
-#include "ggml-backend.h"
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// buffer_type API
-GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void);
-
-GGML_BACKEND_API bool ggml_backend_is_amx(ggml_backend_t backend);
-
-// backend API
-GGML_BACKEND_API ggml_backend_t ggml_backend_amx_init(void);
-
-GGML_BACKEND_API void ggml_backend_amx_set_n_threads(ggml_backend_t backend_amx, int n_threads);
-
-GGML_BACKEND_API ggml_backend_reg_t ggml_backend_amx_reg(void);
-
-#ifdef __cplusplus
-}
-#endif
if (${backend_id})
string(TOLOWER "ggml-${backend}" backend_target)
add_subdirectory(${backend_target})
- # check again in case the backend disabled itself
- # note that this should NOT be the normal behavior, in case of errors the backend should fail the build
- # however, currently it is necessary for AMX, since it is enabled by default on llama.cpp
- if (${backend_id})
- message(STATUS "Including ${backend} backend")
- if (NOT GGML_BACKEND_DL)
- string(TOUPPER "GGML_USE_${backend}" backend_use)
- target_compile_definitions(ggml PUBLIC ${backend_use})
- endif()
+ message(STATUS "Including ${backend} backend")
+ if (NOT GGML_BACKEND_DL)
+ string(TOUPPER "GGML_USE_${backend}" backend_use)
+ target_compile_definitions(ggml PUBLIC ${backend_use})
endif()
endif()
endfunction()
ggml_add_backend(CPU)
-ggml_add_backend(AMX)
ggml_add_backend(BLAS)
ggml_add_backend(CANN)
ggml_add_backend(CUDA)
foreach (target ggml-base ggml)
target_include_directories(${target} PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include> $<INSTALL_INTERFACE:include>)
- target_compile_features (${target} PRIVATE c_std_11) # don't bump
+ target_compile_features (${target} PRIVATE c_std_11 cxx_std_17) # don't bump
endforeach()
target_link_libraries(ggml-base PRIVATE Threads::Threads)
+++ /dev/null
-if (CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64" OR CMAKE_GENERATOR_PLATFORM_LWR MATCHES "^(x86_64|i686|amd64|x64|win32)$" OR
- (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_GENERATOR_PLATFORM_LWR AND
- CMAKE_SYSTEM_PROCESSOR MATCHES "^(x86_64|i686|AMD64)$") AND
- CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 11.0)
- message(STATUS "Using AMX")
-
- file(GLOB GGML_HEADERS_AMX "*.h")
- list(APPEND GGML_HEADERS_AMX "../../include/ggml-amx.h")
-
- file(GLOB GGML_SOURCES_AMX "*.cpp")
-
- ggml_add_backend_library(ggml-amx
- ${GGML_HEADERS_AMX}
- ${GGML_SOURCES_AMX}
- )
-
- # this is duplicated from the CPU backend, since the AMX backend also depends on the architecture flags
- # TODO: integrate AMX backend into the CPU backend
- if (MSVC)
- # instruction set detection for MSVC only
- if (GGML_NATIVE)
- # TODO: improve, should not reference files from the parent folder
- include(../ggml-cpu/cmake/FindSIMD.cmake)
- endif ()
- if (GGML_AVX512)
- list(APPEND ARCH_FLAGS /arch:AVX512)
- # MSVC has no compile-time flags enabling specific
- # AVX512 extensions, neither it defines the
- # macros corresponding to the extensions.
- # Do it manually.
- if (GGML_AVX512_VBMI)
- add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VBMI__>)
- add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VBMI__>)
- endif()
- if (GGML_AVX512_VNNI)
- add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>)
- add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>)
- endif()
- if (GGML_AVX512_BF16)
- add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512BF16__>)
- add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512BF16__>)
- endif()
- if (GGML_AMX_TILE)
- add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AMX_TILE__>)
- add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AMX_TILE__>)
- endif()
- if (GGML_AMX_INT8)
- add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AMX_INT8__>)
- add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AMX_INT8__>)
- endif()
- if (GGML_AMX_BF16)
- add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AMX_BF16__>)
- add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AMX_BF16__>)
- endif()
- elseif (GGML_AVX2)
- list(APPEND ARCH_FLAGS /arch:AVX2)
- elseif (GGML_AVX)
- list(APPEND ARCH_FLAGS /arch:AVX)
- endif()
- else()
- if (GGML_NATIVE)
- list(APPEND ARCH_FLAGS -march=native)
- endif()
- if (GGML_F16C)
- list(APPEND ARCH_FLAGS -mf16c)
- endif()
- if (GGML_FMA)
- list(APPEND ARCH_FLAGS -mfma)
- endif()
- if (GGML_AVX)
- list(APPEND ARCH_FLAGS -mavx)
- endif()
- if (GGML_AVX2)
- list(APPEND ARCH_FLAGS -mavx2)
- endif()
- if (GGML_AVX512)
- list(APPEND ARCH_FLAGS -mavx512f)
- list(APPEND ARCH_FLAGS -mavx512dq)
- list(APPEND ARCH_FLAGS -mavx512bw)
- endif()
- if (GGML_AVX512_VBMI)
- list(APPEND ARCH_FLAGS -mavx512vbmi)
- endif()
- if (GGML_AVX512_VNNI)
- list(APPEND ARCH_FLAGS -mavx512vnni)
- endif()
- if (GGML_AVX512_BF16)
- list(APPEND ARCH_FLAGS -mavx512bf16)
- endif()
- if (GGML_AMX_TILE)
- list(APPEND ARCH_FLAGS -mamx-tile)
- endif()
- if (GGML_AMX_INT8)
- list(APPEND ARCH_FLAGS -mamx-int8)
- endif()
- if (GGML_AMX_BF16)
- list(APPEND ARCH_FLAGS -mamx-bf16)
- endif()
- endif()
-
- target_compile_options(ggml-amx PRIVATE ${ARCH_FLAGS})
-else()
- set(GGML_AMX OFF PARENT_SCOPE)
- message(WARNING "AMX requires x86 and gcc version > 11.0. Turning off GGML_AMX.")
-endif()
+++ /dev/null
-#pragma once
-
-#include "ggml.h"
-// hack until AMX is moved into the CPU backend
-#include "../ggml-cpu/ggml-cpu-impl.h" // <immintrin.h>
-
-#include <algorithm>
-#include <memory>
-#include <type_traits>
-
-#if defined(_OPENMP)
-#include <omp.h>
-#endif
-
-#define TILE_M 16
-#define TILE_N 16
-#define TILE_K 32
-#define VNNI_BLK 4
-
-#define AMX_BLK_SIZE 32
-
-#define TMM0 0
-#define TMM1 1
-#define TMM2 2
-#define TMM3 3
-#define TMM4 4
-#define TMM5 5
-#define TMM6 6
-#define TMM7 7
-
-// parallel routines
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
-inline T div_up(T x, T y) { return (x + y - 1) / y; }
-
-template <typename T>
-inline void balance211(T n, T nth, T ith, T& n_start, T& n_end) {
-#if 0
- // onednn partition pattern
- T& n_my = n_end;
- if (nth <= 1 || n == 0) {
- n_start = 0;
- n_my = n;
- } else {
- T n1 = div_up(n, nth);
- T n2 = n1 - 1;
- T T1 = n - n2 * nth;
- n_my = ith < T1 ? n1 : n2;
- n_start = ith <= T1 ? ith*n1 : T1 * n1 + (ith - T1) * n2;
- }
- n_end += n_start;
-#else
- // pytorch aten partition pattern
- T n_my = div_up(n, nth);
- n_start = ith * n_my;
- n_end = std::min(n_start + n_my, n);
-#endif
-}
-
-template <typename func_t>
-inline void parallel_for(int nth, int n, const func_t& f) {
-#if defined(_OPENMP)
-#pragma omp parallel num_threads(nth)
-{
- //int nth = omp_get_num_threads();
- int ith = omp_get_thread_num();
- int tbegin, tend;
- balance211(n, nth, ith, tbegin, tend);
- f(tbegin, tend);
-}
-#else
- f(0, n);
-
- GGML_UNUSED(nth);
-#endif
-}
-
-// quantized types that have AMX support
-inline bool qtype_has_amx_kernels(const enum ggml_type type) {
- // TODO: fix padding for vnni format
- return (type == GGML_TYPE_Q4_0) ||
- (type == GGML_TYPE_Q4_1);
- //(type == GGML_TYPE_Q8_0) ||
- //(type == GGML_TYPE_Q4_K) ||
- //(type == GGML_TYPE_Q5_K) ||
- //(type == GGML_TYPE_Q6_K) ||
- //(type == GGML_TYPE_IQ4_XS);
-}
-
-// ggml backend context
-struct ggml_backend_amx_context {
- int n_threads = GGML_DEFAULT_N_THREADS;
- std::unique_ptr<char[]> work_data;
- size_t work_size = 0;
-};
+++ /dev/null
-#include "ggml-amx.h"
-#include "ggml-amx/common.h"
-#include "ggml-amx/mmq.h"
-#include "ggml-backend-impl.h"
-#include "ggml-impl.h"
-
-#if defined(__gnu_linux__)
-#include <sys/syscall.h>
-#include <unistd.h>
-#endif
-
-#include <cstdlib>
-#include <cstring>
-#include <memory>
-
-#if defined(__AMX_INT8__)
-
-// AMX buffer interface
-static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) {
- free(buffer->context);
-}
-
-static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) {
- return (void *)(buffer->context);
-}
-
-static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
- memset((char *)tensor->data + offset, value, size);
-
- GGML_UNUSED(buffer);
-}
-
-static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
- if (qtype_has_amx_kernels(tensor->type)) {
- ggml_backend_amx_convert_weight(tensor, data, offset, size);
- } else {
- memcpy((char *)tensor->data + offset, data, size);
- }
-
- GGML_UNUSED(buffer);
-}
-
-static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
- GGML_ASSERT(!qtype_has_amx_kernels(tensor->type));
- memcpy(data, (const char *)tensor->data + offset, size);
-
- GGML_UNUSED(buffer);
-}
-
-static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
- if (ggml_backend_buffer_is_host(src->buffer)) {
- if (qtype_has_amx_kernels(src->type)) {
- ggml_backend_amx_convert_weight(dst, src->data, 0, ggml_backend_amx_get_alloc_size(dst));
- } else {
- memcpy(dst->data, src->data, ggml_nbytes(src));
- }
- return true;
- }
- return false;
-
- GGML_UNUSED(buffer);
-}
-
-static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
- memset(buffer->context, value, buffer->size);
-}
-
-static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = {
- /* .free_buffer = */ ggml_backend_amx_buffer_free_buffer,
- /* .get_base = */ ggml_backend_amx_buffer_get_base,
- /* .init_tensor = */ NULL, // no initialization required
- /* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor,
- /* .set_tensor = */ ggml_backend_amx_buffer_set_tensor,
- /* .get_tensor = */ ggml_backend_amx_buffer_get_tensor,
- /* .cpy_tensor = */ ggml_backend_amx_buffer_cpy_tensor,
- /* .clear = */ ggml_backend_amx_buffer_clear,
- /* .reset = */ NULL,
-};
-
-static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
- return "AMX";
-
- GGML_UNUSED(buft);
-}
-
-static ggml_backend_buffer_t ggml_backend_amx_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
- void * data = aligned_alloc(TENSOR_ALIGNMENT, size);
- if (data == NULL) {
- fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size);
- return NULL;
- }
-
- return ggml_backend_buffer_init(buft, ggml_backend_amx_buffer_interface, data, size);
-}
-
-static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
- return TENSOR_ALIGNMENT;
-
- GGML_UNUSED(buft);
-}
-
-static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor* tensor) {
- return ggml_backend_amx_get_alloc_size(tensor);
-
- GGML_UNUSED(buft);
-}
-
-static bool ggml_backend_amx_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
- return false;
-
- GGML_UNUSED(buft);
-}
-
-ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() {
- static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = {
- /* .iface = */ {
- /* .get_name = */ ggml_backend_amx_buffer_type_get_name,
- /* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer,
- /* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment,
- /* .get_max_size = */ NULL, // defaults to SIZE_MAX
- /* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size,
- /* .is_host = */ ggml_backend_amx_buffer_type_is_host,
- },
- /* .device = */ ggml_backend_reg_dev_get(ggml_backend_amx_reg(), 0),
- /* .context = */ NULL,
- };
-
- return &ggml_backend_buffer_type_amx;
-}
-
-// backend interface
-
-static const char * ggml_backend_amx_name(ggml_backend_t backend) {
- return "AMX";
-
- GGML_UNUSED(backend);
-}
-
-static void ggml_backend_amx_free(ggml_backend_t backend) {
- ggml_backend_amx_context * ctx = (ggml_backend_amx_context *)backend->context;
- delete ctx;
- delete backend;
-}
-
-static enum ggml_status ggml_backend_amx_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
- ggml_backend_amx_context * ctx = (ggml_backend_amx_context *)backend->context;
-
- for (int i = 0; i < cgraph->n_nodes; i++) {
- struct ggml_tensor * node = cgraph->nodes[i];
-
- switch (node->op) {
- case GGML_OP_MUL_MAT:
- ggml_backend_amx_mul_mat(ctx, node);
- break;
-
- case GGML_OP_NONE:
- case GGML_OP_RESHAPE:
- case GGML_OP_VIEW:
- case GGML_OP_PERMUTE:
- case GGML_OP_TRANSPOSE:
- break;
-
- default:
- fprintf(stderr, "%s: unsupported op %s\n", __func__, ggml_op_desc(node));
- GGML_ASSERT(false);
- }
- }
-
- return GGML_STATUS_SUCCESS;
-
- GGML_UNUSED(backend);
-}
-
-static struct ggml_backend_i ggml_backend_amx_i = {
- /* .get_name = */ ggml_backend_amx_name,
- /* .free = */ ggml_backend_amx_free,
- /* .set_tensor_async = */ NULL,
- /* .get_tensor_async = */ NULL,
- /* .cpy_tensor_async = */ NULL,
- /* .synchronize = */ NULL,
- /* .graph_plan_create = */ NULL,
- /* .graph_plan_free = */ NULL,
- /* .graph_plan_update = */ NULL,
- /* .graph_plan_compute = */ NULL,
- /* .graph_compute = */ ggml_backend_amx_graph_compute,
- /* .event_record = */ NULL,
- /* .event_wait = */ NULL,
-};
-
-static ggml_guid_t ggml_backend_amx_guid() {
- static ggml_guid guid = { 0x13, 0xb8, 0xa4, 0xc4, 0xba, 0xfe, 0x51, 0x67, 0x87, 0x44, 0x55, 0x15, 0xb2, 0x35, 0x62, 0x3e };
- return &guid;
-}
-
-#define ARCH_GET_XCOMP_PERM 0x1022
-#define ARCH_REQ_XCOMP_PERM 0x1023
-#define XFEATURE_XTILECFG 17
-#define XFEATURE_XTILEDATA 18
-
-static bool ggml_amx_init() {
-#if defined(__gnu_linux__)
- if (syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA)) {
- fprintf(stderr, "AMX is not ready to be used!\n");
- return false;
- }
- return true;
-#elif defined(_WIN32)
- return true;
-#endif
-}
-
-ggml_backend_t ggml_backend_amx_init() {
-
- // invoke a Linux system call to request access to AMX features
- ggml_amx_init();
-
- // backend context
- ggml_backend_amx_context * ctx = new ggml_backend_amx_context;
-
- // ggml amx backend
- ggml_backend_t backend = new ggml_backend {
- /* .guid = */ ggml_backend_amx_guid(),
- /* .interface = */ ggml_backend_amx_i,
- /* .device = */ ggml_backend_reg_dev_get(ggml_backend_amx_reg(), 0),
- /* .context = */ ctx,
- };
-
- return backend;
-}
-
-bool ggml_backend_is_amx(ggml_backend_t backend) {
- return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_amx_guid());
-}
-
-void ggml_backend_amx_set_n_threads(ggml_backend_t backend_amx, int n_threads) {
- GGML_ASSERT(ggml_backend_is_amx(backend_amx));
-
- ggml_backend_amx_context * ctx = (ggml_backend_amx_context *)backend_amx->context;
- ctx->n_threads = n_threads;
-}
-
-// device interface
-
-static const char * ggml_backend_amx_device_get_name(ggml_backend_dev_t dev) {
- return "AMX";
-
- GGML_UNUSED(dev);
-}
-
-static const char * ggml_backend_amx_device_get_description(ggml_backend_dev_t dev) {
- return "Intel Advanced Matrix Extensions";
-
- GGML_UNUSED(dev);
-}
-
-static void ggml_backend_amx_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
- // TODO
- *free = 0;
- *total = 0;
-
- GGML_UNUSED(dev);
-}
-
-static enum ggml_backend_dev_type ggml_backend_amx_device_get_type(ggml_backend_dev_t dev) {
- return GGML_BACKEND_DEVICE_TYPE_ACCEL;
-
- GGML_UNUSED(dev);
-}
-
-static void ggml_backend_amx_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
- props->name = ggml_backend_amx_device_get_name(dev);
- props->description = ggml_backend_amx_device_get_description(dev);
- props->type = ggml_backend_amx_device_get_type(dev);
- ggml_backend_amx_device_get_memory(dev, &props->memory_free, &props->memory_total);
-
- // `buffer_from_host_ptr` is intended to be used in mmap, when memory layout unchanged
- props->caps = {
- /* .async = */ false,
- /* .host_buffer = */ false,
- /* .buffer_from_host_ptr = */ false,
- /* .events = */ false,
- };
-}
-
-static ggml_backend_t ggml_backend_amx_device_init(ggml_backend_dev_t dev, const char * params) {
- return ggml_backend_amx_init();
-
- GGML_UNUSED(dev);
- GGML_UNUSED(params);
-}
-
-static ggml_backend_buffer_type_t ggml_backend_amx_device_get_buffer_type(ggml_backend_dev_t dev) {
- return ggml_backend_amx_buffer_type();
-
- GGML_UNUSED(dev);
-}
-
-static bool ggml_backend_amx_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
-
- // handle only 2d gemm for now
- auto is_contiguous_2d = [](const struct ggml_tensor * t) {
- return ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1;
- };
-
- switch (op->op) {
- case GGML_OP_NONE:
- case GGML_OP_RESHAPE:
- case GGML_OP_VIEW:
- case GGML_OP_PERMUTE:
- case GGML_OP_TRANSPOSE:
- return true;
-
- case GGML_OP_MUL_MAT: {
- const struct ggml_tensor * src0 = op->src[0];
- const struct ggml_tensor * src1 = op->src[1];
-
- const enum ggml_type type = src0->type;
- const int64_t ne0 = op->ne[0];
-
- // amx kernels enables for Q4_0, Q4_1, Q8_0, F16
- // Q4_K, Q5_K, Q6_K, IQ4_XS enabled for QK_K = 256
- bool has_amx_kernels = qtype_has_amx_kernels(type) || (type == GGML_TYPE_F16);
-
- bool can_use_amx =
- is_contiguous_2d(src0) && // src0 must be contiguous
- is_contiguous_2d(src1) && // src1 must be contiguous
- src1->type == GGML_TYPE_F32 && // src1 must be float32
- has_amx_kernels && // with amx kernel impls
- ne0 % (TILE_N * 2) == 0; // out_features is 32x
-
- return can_use_amx;
- }
- default:
- return false;
- }
-
- GGML_UNUSED(dev);
-}
-
-static bool ggml_backend_amx_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
- return buft->iface.get_name == ggml_backend_amx_buffer_type_get_name;
-
- GGML_UNUSED(dev);
-}
-
-static const struct ggml_backend_device_i ggml_backend_amx_device_i = {
- /* .get_name = */ ggml_backend_amx_device_get_name,
- /* .get_description = */ ggml_backend_amx_device_get_description,
- /* .get_memory = */ ggml_backend_amx_device_get_memory,
- /* .get_type = */ ggml_backend_amx_device_get_type,
- /* .get_props = */ ggml_backend_amx_device_get_props,
- /* .init_backend = */ ggml_backend_amx_device_init,
- /* .get_buffer_type = */ ggml_backend_amx_device_get_buffer_type,
- /* .get_host_buffer_type = */ NULL,
- /* .buffer_from_host_ptr = */ NULL,
- /* .supports_op = */ ggml_backend_amx_device_supports_op,
- /* .supports_buft = */ ggml_backend_amx_device_supports_buft,
- /* .offload_op = */ NULL,
- /* .event_new = */ NULL,
- /* .event_free = */ NULL,
- /* .event_synchronize = */ NULL,
-};
-
-// backend reg interface
-
-static const char * ggml_backend_amx_reg_get_name(ggml_backend_reg_t reg) {
- return "AMX";
-
- GGML_UNUSED(reg);
-}
-
-static size_t ggml_backend_amx_reg_get_device_count(ggml_backend_reg_t reg) {
- return 1;
-
- GGML_UNUSED(reg);
-}
-
-static ggml_backend_dev_t ggml_backend_amx_reg_get_device(ggml_backend_reg_t reg, size_t index) {
- GGML_ASSERT(index == 0);
-
- static ggml_backend_device ggml_backend_amx_device = {
- /* .iface = */ ggml_backend_amx_device_i,
- /* .reg = */ reg,
- /* .context = */ nullptr,
- };
-
- return &ggml_backend_amx_device;
-
- GGML_UNUSED(reg);
- GGML_UNUSED(index);
-}
-
-static void * ggml_backend_amx_get_proc_address(ggml_backend_reg_t reg, const char * name) {
- if (std::strcmp(name, "ggml_backend_set_n_threads") == 0) {
- return (void *)ggml_backend_amx_set_n_threads;
- }
- return NULL;
-
- GGML_UNUSED(reg);
- GGML_UNUSED(name);
-}
-
-static const struct ggml_backend_reg_i ggml_backend_amx_reg_i = {
- /* .get_name = */ ggml_backend_amx_reg_get_name,
- /* .get_device_count = */ ggml_backend_amx_reg_get_device_count,
- /* .get_device = */ ggml_backend_amx_reg_get_device,
- /* .get_proc_address = */ ggml_backend_amx_get_proc_address,
-};
-
-ggml_backend_reg_t ggml_backend_amx_reg(void) {
- static struct ggml_backend_reg ggml_backend_amx_reg = {
- /* .api_version = */ GGML_BACKEND_API_VERSION,
- /* .iface = */ ggml_backend_amx_reg_i,
- /* .context = */ NULL,
- };
-
- return &ggml_backend_amx_reg;
-}
-
-#else // if defined(__AMX_INT8__)
-
-ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void) {
- return nullptr;
-}
-
-bool ggml_backend_is_amx(ggml_backend_t backend) {
- GGML_UNUSED(backend);
- return false;
-}
-
-ggml_backend_t ggml_backend_amx_init(void) {
- fprintf(stderr, "GGML is not compiled with AMX support!\n");
- return nullptr;
-}
-
-void ggml_backend_amx_set_n_threads(ggml_backend_t backend_amx, int n_threads) {
- fprintf(stderr, "GGML is not compiled with AMX support!\n");
-
- GGML_UNUSED(backend_amx);
- GGML_UNUSED(n_threads);
-}
-
-ggml_backend_reg_t ggml_backend_amx_reg(void) {
- return nullptr;
-}
-
-#endif
-
-GGML_BACKEND_DL_IMPL(ggml_backend_amx_reg)
+++ /dev/null
-
-#if defined(__GNUC__)
-#pragma GCC diagnostic ignored "-Wpedantic"
-#pragma GCC diagnostic ignored "-Wunused-local-typedefs"
-#endif
-
-#include "mmq.h"
-#include "ggml-impl.h"
-#include "ggml-quants.h"
-#include <algorithm>
-#include <type_traits>
-
-#if defined(__gnu_linux__)
-#include <sys/syscall.h>
-#include <unistd.h>
-#endif
-
-#if defined(_OPENMP)
-#include <omp.h>
-#endif
-
-#if (defined(_WIN32) || defined(_WIN64))
-#define RESTRICT __restrict
-#else
-#define RESTRICT __restrict__
-#endif
-
-#if (defined(_WIN32) || defined(_WIN64))
-#define ALWAYS_INLINE __forceinline
-#elif __has_attribute(always_inline) || defined(__GNUC__)
-#define ALWAYS_INLINE __attribute__((__always_inline__)) inline
-#else
-#define ALWAYS_INLINE inline
-#endif
-
-#if defined(__AMX_INT8__)
-
-namespace {
-
-// Forced unrolling
-template <int n>
-struct Unroll {
- template <typename Func, typename... Args>
- ALWAYS_INLINE void operator()(const Func& f, Args... args) const {
- Unroll<n - 1>{}(f, args...);
- f(std::integral_constant<int, n - 1>{}, args...);
- }
-};
-
-template <>
-struct Unroll<1> {
- template <typename Func, typename... Args>
- ALWAYS_INLINE void operator()(const Func& f, Args... args) const {
- f(std::integral_constant<int, 0>{}, args...);
- }
-};
-
-// type traits
-template <typename T> struct PackedTypes {};
-template <> struct PackedTypes<block_q4_0> { using type = int8_t; };
-template <> struct PackedTypes<block_q4_1> { using type = uint8_t; };
-template <> struct PackedTypes<block_q8_0> { using type = int8_t; };
-template <typename T> using packed_B_type = typename PackedTypes<T>::type;
-
-template <typename T>
-struct do_compensate : std::integral_constant<bool,
- std::is_same<T, block_q8_0>::value> {};
-
-template <typename T>
-struct do_unpack : std::integral_constant<bool,
- std::is_same<T, block_q4_0>::value ||
- std::is_same<T, block_q4_1>::value> {};
-
-template <typename T>
-struct is_type_qkk : std::integral_constant<bool,
- std::is_same<T, block_q4_K>::value ||
- std::is_same<T, block_q5_K>::value ||
- std::is_same<T, block_q6_K>::value ||
- std::is_same<T, block_iq4_xs>::value> {};
-
-#define GGML_DISPATCH_FLOATING_TYPES(TYPE, ...) \
- [&] { \
- switch (TYPE) { \
- case GGML_TYPE_F16: { \
- using type = ggml_fp16_t; \
- constexpr int blck_size = 16; \
- return __VA_ARGS__(); \
- } \
- case GGML_TYPE_BF16: { \
- using type = ggml_bf16_t; \
- constexpr int blck_size = 32; \
- return __VA_ARGS__(); \
- } \
- default: \
- fprintf(stderr, "Unsupported floating data type\n"); \
- } \
- }()
-
-#define GGML_DISPATCH_QTYPES(QT, ...) \
- [&] { \
- switch (QT) { \
- case GGML_TYPE_Q4_0: { \
- using type = block_q4_0; \
- using vec_dot_type = block_q8_0; \
- constexpr int blck_size = QK4_0; \
- return __VA_ARGS__(); \
- } \
- case GGML_TYPE_Q4_1: { \
- using type = block_q4_1; \
- using vec_dot_type = block_q8_1; \
- constexpr int blck_size = QK4_1; \
- return __VA_ARGS__(); \
- } \
- case GGML_TYPE_Q8_0: { \
- using type = block_q8_0; \
- using vec_dot_type = block_q8_0; \
- constexpr int blck_size = QK8_0; \
- return __VA_ARGS__(); \
- } \
- case GGML_TYPE_Q4_K: { \
- using type = block_q4_K; \
- using vec_dot_type = block_q8_K; \
- constexpr int blck_size = QK_K; \
- return __VA_ARGS__(); \
- } \
- case GGML_TYPE_Q5_K: { \
- using type = block_q5_K; \
- using vec_dot_type = block_q8_K; \
- constexpr int blck_size = QK_K; \
- return __VA_ARGS__(); \
- } \
- case GGML_TYPE_Q6_K: { \
- using type = block_q6_K; \
- using vec_dot_type = block_q8_K; \
- constexpr int blck_size = QK_K; \
- return __VA_ARGS__(); \
- } \
- case GGML_TYPE_IQ4_XS: { \
- using type = block_iq4_xs; \
- using vec_dot_type = block_q8_K; \
- constexpr int blck_size = QK_K; \
- return __VA_ARGS__(); \
- } \
- default: \
- fprintf(stderr, "Unsupported quantized data type: %d\n", int(TYPE)); \
- } \
- }()
-
-#define GGML_DISPATCH_BOOL(BOOL_V, BOOL_NAME, ...) \
- [&] { \
- if (BOOL_V) { \
- constexpr bool BOOL_NAME = true; \
- return __VA_ARGS__(); \
- } else { \
- constexpr bool BOOL_NAME = false; \
- return __VA_ARGS__(); \
- } \
- }()
-
-// define amx tile config data structure
-struct tile_config_t{
- uint8_t palette_id = 0;
- uint8_t start_row = 0;
- uint8_t reserved_0[14] = {0};
- uint16_t colsb[16] = {0};
- uint8_t rows[16] = {0};
-};
-
-// Notes: amx tile config
-//
-// Typically, TMUL calculates A and B of size 16 x 64 containing INT8 values,
-// and accumulate the result to a 16 x 16 matrix C containing INT32 values,
-//
-// As many GGUF quantized types as `block_size` of 32, so a 16-16-32 config is used
-// instead of the normally used 16-16-64 config.
-//
-// Block A: {16, 32}, dtype = int8_t
-// Block B: {16, 32}, dtype = uint8_t/int8_t
-// Block C: {16, 16}, dtype = int32_t
-//
-// Block B needs to be prepacked to vnni format before feeding into TMUL:
-// packed_B: from {n, k} to {k/vnni_blk, n, vnni_blck}, viewed in 2d, we get {8, 64}
-//
-// Therefore, we get tileconfig:
-// A B C
-// rows 16 8 16
-// colsb 32 64 16
-//
-// For tile distribution, follow a 2-2-4 pattern, e.g. A used TMM2-TMM3, B used TMM0-TMM1,
-// C used TMM4-TMM7:
-// B TMM0 B TMM1
-// A TMM2 C TMM4 C TMM6
-// A TMM3 C TMM5 C TMM7
-//
-// Each `amx` kernel handles 4 blocks at a time: 2MB * 2NB, when m < 2 * BLOCK_M, unpack A
-// will be needed.
-//
-// Here another commonly used pattern 1-3-3 is skipped, as it is mostly used when m <=16;
-// and the sinlge batch gemm (m=1) has a special fast path with `avx512-vnni`.
-//
-// ref: https://www.intel.com/content/www/us/en/developer/articles/code-sample/
-// advanced-matrix-extensions-intrinsics-functions.html
-//
-
-#define TC_CONFIG_TILE(i, r, cb) tc.rows[i] = r; tc.colsb[i] = cb
-void ggml_tile_config_init(void) {
- static thread_local bool is_first_time = true;
-
- if (!is_first_time) {
- return;
- }
-
- static thread_local tile_config_t tc;
- tile_config_t current_tc;
- _tile_storeconfig(¤t_tc);
-
- // load only when config changes
- if (tc.palette_id == 0 || (memcmp(¤t_tc.colsb, &tc.colsb, sizeof(uint16_t) * 8) != 0 &&
- memcmp(¤t_tc.rows, &tc.rows, sizeof(uint8_t) * 8) != 0)) {
- tc.palette_id = 1;
- tc.start_row = 0;
- TC_CONFIG_TILE(TMM0, 8, 64);
- TC_CONFIG_TILE(TMM1, 8, 64);
- TC_CONFIG_TILE(TMM2, 16, 32);
- TC_CONFIG_TILE(TMM3, 16, 32);
- TC_CONFIG_TILE(TMM4, 16, 64);
- TC_CONFIG_TILE(TMM5, 16, 64);
- TC_CONFIG_TILE(TMM6, 16, 64);
- TC_CONFIG_TILE(TMM7, 16, 64);
- _tile_loadconfig(&tc);
- }
-
- is_first_time = false;
-}
-
-// we need an extra 16 * 4B (TILE_N * int32_t) for each NB/KB block for compensation.
-// See the notes `s8s8 igemm compensation in avx512-vnni` for detail.
-template <typename TB>
-int get_tile_size() {
- int tile_size = TILE_N * sizeof(TB);
- if (do_compensate<TB>::value) {
- tile_size += TILE_N * sizeof(int32_t);
- }
- if (std::is_same<TB, block_q4_K>::value ||
- std::is_same<TB, block_q5_K>::value) {
- tile_size += TILE_N * 4;
- }
- if (std::is_same<TB, block_iq4_xs>::value) {
- tile_size += TILE_N * 2;
- }
- return tile_size;
-}
-
-template <typename TB, int BLOCK_K>
-int get_row_size(int K) {
- int KB = K / BLOCK_K;
- int row_size = KB * sizeof(TB);
- if (do_compensate<TB>::value) {
- row_size += KB * sizeof(int32_t);
- }
- if (std::is_same<TB, block_q4_K>::value ||
- std::is_same<TB, block_q5_K>::value) {
- row_size += KB * 4;
- }
- if (std::is_same<TB, block_iq4_xs>::value) {
- row_size += KB * 2;
- }
- return row_size;
-}
-
-// vectorized dtype conversion
-inline float FP16_TO_FP32(ggml_half val) {
- __m256i v = _mm256_setr_epi16(
- val, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
- __m512 o = _mm512_cvtph_ps(v);
- return _mm512_cvtss_f32(o);
-}
-
-inline __m512 FP16_TO_FP32_VEC(ggml_half val) {
- __m256i v = _mm256_set1_epi16(val);
- return _mm512_cvtph_ps(v);
-}
-
-// horizontal reduce
-inline float _mm512_reduce_max_ps(const __m512 x) {
- __m512 v = x;
- __m512 v1 = _mm512_shuffle_f32x4(v, v, 0x4E);
- v = _mm512_max_ps(v, v1);
- v1 = _mm512_shuffle_f32x4(v, v, 0xB1);
- v = _mm512_max_ps(v, v1);
- v1 = _mm512_shuffle_ps(v, v, 0x4E);
- v = _mm512_max_ps(v, v1);
- v1 = _mm512_shuffle_ps(v, v, 0xB1);
- v = _mm512_max_ps(v, v1);
- return _mm512_cvtss_f32(v);
-}
-
-// transpose utils
-#define SHUFFLE_EPI32(a, b, mask) \
- _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b), mask))
-inline void transpose_8x8_32bit(__m256i * v, __m256i * v1) {
- // unpacking and 32-bit elements
- v1[0] = _mm256_unpacklo_epi32(v[0], v[1]);
- v1[1] = _mm256_unpackhi_epi32(v[0], v[1]);
- v1[2] = _mm256_unpacklo_epi32(v[2], v[3]);
- v1[3] = _mm256_unpackhi_epi32(v[2], v[3]);
- v1[4] = _mm256_unpacklo_epi32(v[4], v[5]);
- v1[5] = _mm256_unpackhi_epi32(v[4], v[5]);
- v1[6] = _mm256_unpacklo_epi32(v[6], v[7]);
- v1[7] = _mm256_unpackhi_epi32(v[6], v[7]);
-
- // shuffling the 32-bit elements
- v[0] = SHUFFLE_EPI32(v1[0], v1[2], 0x44);
- v[1] = SHUFFLE_EPI32(v1[0], v1[2], 0xee);
- v[2] = SHUFFLE_EPI32(v1[4], v1[6], 0x44);
- v[3] = SHUFFLE_EPI32(v1[4], v1[6], 0xee);
- v[4] = SHUFFLE_EPI32(v1[1], v1[3], 0x44);
- v[5] = SHUFFLE_EPI32(v1[1], v1[3], 0xee);
- v[6] = SHUFFLE_EPI32(v1[5], v1[7], 0x44);
- v[7] = SHUFFLE_EPI32(v1[5], v1[7], 0xee);
-
- // shuffling 128-bit elements
- v1[0] = _mm256_permute2f128_si256(v[2], v[0], 0x02);
- v1[1] = _mm256_permute2f128_si256(v[3], v[1], 0x02);
- v1[2] = _mm256_permute2f128_si256(v[6], v[4], 0x02);
- v1[3] = _mm256_permute2f128_si256(v[7], v[5], 0x02);
- v1[4] = _mm256_permute2f128_si256(v[2], v[0], 0x13);
- v1[5] = _mm256_permute2f128_si256(v[3], v[1], 0x13);
- v1[6] = _mm256_permute2f128_si256(v[6], v[4], 0x13);
- v1[7] = _mm256_permute2f128_si256(v[7], v[5], 0x13);
-}
-
-inline void transpose_16x4_32bit(__m512i * r, __m512i * d) {
-
- static const __m512i index1 = _mm512_set_epi32(
- 0x0f, 0x0b, 0x07, 0x03,
- 0x0e, 0x0a, 0x06, 0x02,
- 0x0d, 0x09, 0x05, 0x01,
- 0x0c, 0x08, 0x04, 0x00);
-
- d[0] = _mm512_permutexvar_epi32(index1, r[0]);
- d[1] = _mm512_permutexvar_epi32(index1, r[1]);
- d[2] = _mm512_permutexvar_epi32(index1, r[2]);
- d[3] = _mm512_permutexvar_epi32(index1, r[3]);
-
- r[0] = _mm512_shuffle_i32x4(d[0], d[1], 0x44);
- r[1] = _mm512_shuffle_i32x4(d[0], d[1], 0xee);
- r[2] = _mm512_shuffle_i32x4(d[2], d[3], 0x44);
- r[3] = _mm512_shuffle_i32x4(d[2], d[3], 0xee);
-
- d[0] = _mm512_shuffle_i32x4(r[0], r[2], 0x88);
- d[1] = _mm512_shuffle_i32x4(r[0], r[2], 0xdd);
- d[2] = _mm512_shuffle_i32x4(r[1], r[3], 0x88);
- d[3] = _mm512_shuffle_i32x4(r[1], r[3], 0xdd);
-}
-
-inline void transpose_16x16_32bit(__m512i * v) {
- __m512i v1[16];
- v1[0] = _mm512_unpacklo_epi32(v[0], v[1]);
- v1[1] = _mm512_unpackhi_epi32(v[0], v[1]);
- v1[2] = _mm512_unpacklo_epi32(v[2], v[3]);
- v1[3] = _mm512_unpackhi_epi32(v[2], v[3]);
- v1[4] = _mm512_unpacklo_epi32(v[4], v[5]);
- v1[5] = _mm512_unpackhi_epi32(v[4], v[5]);
- v1[6] = _mm512_unpacklo_epi32(v[6], v[7]);
- v1[7] = _mm512_unpackhi_epi32(v[6], v[7]);
- v1[8] = _mm512_unpacklo_epi32(v[8], v[9]);
- v1[9] = _mm512_unpackhi_epi32(v[8], v[9]);
- v1[10] = _mm512_unpacklo_epi32(v[10], v[11]);
- v1[11] = _mm512_unpackhi_epi32(v[10], v[11]);
- v1[12] = _mm512_unpacklo_epi32(v[12], v[13]);
- v1[13] = _mm512_unpackhi_epi32(v[12], v[13]);
- v1[14] = _mm512_unpacklo_epi32(v[14], v[15]);
- v1[15] = _mm512_unpackhi_epi32(v[14], v[15]);
-
- v[0] = _mm512_unpacklo_epi64(v1[0], v1[2]);
- v[1] = _mm512_unpackhi_epi64(v1[0], v1[2]);
- v[2] = _mm512_unpacklo_epi64(v1[1], v1[3]);
- v[3] = _mm512_unpackhi_epi64(v1[1], v1[3]);
- v[4] = _mm512_unpacklo_epi64(v1[4], v1[6]);
- v[5] = _mm512_unpackhi_epi64(v1[4], v1[6]);
- v[6] = _mm512_unpacklo_epi64(v1[5], v1[7]);
- v[7] = _mm512_unpackhi_epi64(v1[5], v1[7]);
- v[8] = _mm512_unpacklo_epi64(v1[8], v1[10]);
- v[9] = _mm512_unpackhi_epi64(v1[8], v1[10]);
- v[10] = _mm512_unpacklo_epi64(v1[9], v1[11]);
- v[11] = _mm512_unpackhi_epi64(v1[9], v1[11]);
- v[12] = _mm512_unpacklo_epi64(v1[12], v1[14]);
- v[13] = _mm512_unpackhi_epi64(v1[12], v1[14]);
- v[14] = _mm512_unpacklo_epi64(v1[13], v1[15]);
- v[15] = _mm512_unpackhi_epi64(v1[13], v1[15]);
-
- v1[0] = _mm512_shuffle_i32x4(v[0], v[4], 0x88);
- v1[1] = _mm512_shuffle_i32x4(v[1], v[5], 0x88);
- v1[2] = _mm512_shuffle_i32x4(v[2], v[6], 0x88);
- v1[3] = _mm512_shuffle_i32x4(v[3], v[7], 0x88);
- v1[4] = _mm512_shuffle_i32x4(v[0], v[4], 0xdd);
- v1[5] = _mm512_shuffle_i32x4(v[1], v[5], 0xdd);
- v1[6] = _mm512_shuffle_i32x4(v[2], v[6], 0xdd);
- v1[7] = _mm512_shuffle_i32x4(v[3], v[7], 0xdd);
- v1[8] = _mm512_shuffle_i32x4(v[8], v[12], 0x88);
- v1[9] = _mm512_shuffle_i32x4(v[9], v[13], 0x88);
- v1[10] = _mm512_shuffle_i32x4(v[10], v[14], 0x88);
- v1[11] = _mm512_shuffle_i32x4(v[11], v[15], 0x88);
- v1[12] = _mm512_shuffle_i32x4(v[8], v[12], 0xdd);
- v1[13] = _mm512_shuffle_i32x4(v[9], v[13], 0xdd);
- v1[14] = _mm512_shuffle_i32x4(v[10], v[14], 0xdd);
- v1[15] = _mm512_shuffle_i32x4(v[11], v[15], 0xdd);
-
- v[0] = _mm512_shuffle_i32x4(v1[0], v1[8], 0x88);
- v[1] = _mm512_shuffle_i32x4(v1[1], v1[9], 0x88);
- v[2] = _mm512_shuffle_i32x4(v1[2], v1[10], 0x88);
- v[3] = _mm512_shuffle_i32x4(v1[3], v1[11], 0x88);
- v[4] = _mm512_shuffle_i32x4(v1[4], v1[12], 0x88);
- v[5] = _mm512_shuffle_i32x4(v1[5], v1[13], 0x88);
- v[6] = _mm512_shuffle_i32x4(v1[6], v1[14], 0x88);
- v[7] = _mm512_shuffle_i32x4(v1[7], v1[15], 0x88);
- v[8] = _mm512_shuffle_i32x4(v1[0], v1[8], 0xdd);
- v[9] = _mm512_shuffle_i32x4(v1[1], v1[9], 0xdd);
- v[10] = _mm512_shuffle_i32x4(v1[2], v1[10], 0xdd);
- v[11] = _mm512_shuffle_i32x4(v1[3], v1[11], 0xdd);
- v[12] = _mm512_shuffle_i32x4(v1[4], v1[12], 0xdd);
- v[13] = _mm512_shuffle_i32x4(v1[5], v1[13], 0xdd);
- v[14] = _mm512_shuffle_i32x4(v1[6], v1[14], 0xdd);
- v[15] = _mm512_shuffle_i32x4(v1[7], v1[15], 0xdd);
-}
-
-void quantize_row_q8_K_vnni(const float * RESTRICT x, void * RESTRICT vy, int64_t k) {
- assert(k % QK_K == 0);
- const int KB = k / QK_K;
- constexpr int kVecs = QK_K / 16;
-
- block_q8_K * y = reinterpret_cast<block_q8_K *>(vy);
-
- // hold 16 float vecs from x
- __m512 v[kVecs];
-
- // hold the quants vecs
- __m512i vq[kVecs / 4];
-
- // hold the packed quants vecs
- __m512i vq_packed[kVecs / 4];
-
- const __m512 signBit = _mm512_set1_ps(-0.f);
-
- for (int i = 0; i < KB; ++i) {
- // Compute max(abs(e)) for the block
- __m512 vamax = _mm512_set1_ps(0.f);
- for (int j = 0; j < kVecs; ++j) {
- v[j] = _mm512_loadu_ps(x); x += 16;
- vamax = _mm512_max_ps(vamax, _mm512_andnot_ps(signBit, v[j]));
- }
- const float amax = _mm512_reduce_max_ps(vamax);
-
- // Quantize these floats
- const float iscale = 127.f / amax;
- y[i].d = GGML_FP32_TO_FP16(1 / iscale);
- const float id = ( amax != 0.0f ) ? iscale : 0.f;
- const __m512 vscale = _mm512_set1_ps(id);
-
- // Apply multiplier and round to nearest integer
- for (int j = 0; j < kVecs; ++j) {
- v[j] = _mm512_mul_ps(v[j], vscale);
- v[j] = _mm512_roundscale_ps(v[j], (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
- }
-
- // Pack to epi8 vecs
- for (int j = 0; j < kVecs / 4; ++j) {
- __m128i q8_0 = _mm512_cvtepi32_epi8(_mm512_cvtps_epi32(v[j * 4 + 0]));
- __m128i q8_1 = _mm512_cvtepi32_epi8(_mm512_cvtps_epi32(v[j * 4 + 1]));
- __m128i q8_2 = _mm512_cvtepi32_epi8(_mm512_cvtps_epi32(v[j * 4 + 2]));
- __m128i q8_3 = _mm512_cvtepi32_epi8(_mm512_cvtps_epi32(v[j * 4 + 3]));
-
- __m256i q8_01 = _mm256_insertf128_si256(_mm256_castsi128_si256(q8_0), (q8_1), 1);
- __m256i q8_23 = _mm256_insertf128_si256(_mm256_castsi128_si256(q8_2), (q8_3), 1);
-
- vq[j] = _mm512_inserti32x8(_mm512_castsi256_si512(q8_01), q8_23, 1);
- _mm512_storeu_si512((__m512i *)(y[i].qs + j * 64), vq[j]);
- }
-
- // Compute the bsums with vnni
- transpose_16x4_32bit(vq, vq_packed);
-
- const __m512i one = _mm512_set1_epi8(1);
- __m512i sum = _mm512_setzero_si512();
- for (int k = 0; k < 4; ++k) {
- sum = _mm512_dpbusd_epi32(sum, one, vq_packed[k]);
- }
- _mm256_storeu_si256((__m256i *)(y[i].bsums), _mm512_cvtepi32_epi16(sum));
- }
-}
-
-// quantize A from float to `vec_dot_type`
-template <typename T>
-inline void from_float(const float * x, char * vy, int64_t k);
-
-template <>
-inline void from_float<block_q8_0>(const float * x, char * vy, int64_t k) {
- // FIXME: using unoptimized reference impl until moved to CPU backend
- quantize_row_q8_0_ref(x, (block_q8_0 *)vy, k);
-}
-
-template <>
-inline void from_float<block_q8_1>(const float * x, char * vy, int64_t k) {
- quantize_row_q8_1_ref(x, (block_q8_1 *)vy, k);
-}
-
-template <>
-inline void from_float<block_q8_K>(const float * x, char * vy, int64_t k) {
-#if 1
- // TODO: this is reference impl!
- quantize_row_q8_K_ref(x, (block_q8_K *)vy, k);
-#else
- quantize_row_q8_K_vnni(x, vy, k);
-#endif
-}
-
-// load A from memory to array when nrows can not fill in whole tile
-void unpack_A(int8_t * RESTRICT tile, const block_q8_0 * RESTRICT A, int lda, int nr) {
- assert(nr != TILE_M);
- for (int m = 0; m < nr; ++m) {
- const __m256i v = _mm256_loadu_si256((const __m256i *)(A[m * lda].qs));
- _mm256_storeu_si256((__m256i *)(tile + m * TILE_K), v);
- }
-}
-
-void unpack_A(int8_t * RESTRICT tile, const block_q8_1 * RESTRICT A, int lda, int nr) {
- assert(nr != TILE_M);
- for (int m = 0; m < nr; ++m) {
- const __m256i v = _mm256_loadu_si256((const __m256i *)(A[m * lda].qs));
- _mm256_storeu_si256((__m256i *)(tile + m * TILE_K), v);
- }
-}
-
-template <typename TB>
-void unpack_A(int8_t * RESTRICT tile, const block_q8_K * RESTRICT A, int lda, int k, int nr) {
- assert(nr <= TILE_M);
- for (int m = 0; m < nr; ++m) {
- const __m256i v = _mm256_loadu_si256((const __m256i *)(A[m * lda].qs + k * 32));
- _mm256_storeu_si256((__m256i *)(tile + m * TILE_K), v);
- }
-}
-
-template <>
-void unpack_A<block_q6_K>(int8_t * RESTRICT tile, const block_q8_K * RESTRICT A, int lda, int k, int nr) {
- assert(nr <= TILE_M);
- // zero padding k from 16 to 32, so that we don't have to re-config amx
- const __m128i zero = _mm_setzero_si128();
- for (int m = 0; m < nr; ++m) {
- const __m128i v = _mm_loadu_si128((const __m128i *)(A[m * lda].qs + k * 16));
- const __m256i r = _mm256_insertf128_si256(_mm256_castsi128_si256(v), zero, 1);
- _mm256_storeu_si256((__m256i *)(tile + m * TILE_K), r);
- }
-}
-
-#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
-inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) {
- const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
- const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
- const __m256i lowMask = _mm256_set1_epi8(0xF);
- return _mm256_and_si256(lowMask, bytes);
-}
-
-// used for block_q4_K
-inline __m512i bytes_from_nibbles_64(const uint8_t * rsi) {
- const __m256i tmp = _mm256_loadu_si256((const __m256i *)rsi);
- const __m256i lowMask = _mm256_set1_epi8(0xF);
- const __m256i q4l = _mm256_and_si256(tmp, lowMask);
- const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(tmp, 4), lowMask);
- return _mm512_inserti32x8(_mm512_castsi256_si512(q4l), q4h, 1);
-}
-
-// used for block_q5_K
-inline __m512i bytes_from_nibbles_64(const uint8_t * qs, const uint8_t * qh, int k) {
- const __m256i lowMask = _mm256_set1_epi8(0xF);
- __m256i hmask = _mm256_set1_epi8(1);
- hmask = _mm256_slli_epi16(hmask, k);
-
- const __m256i q5bits = _mm256_loadu_si256((const __m256i *)qs);
- const __m256i hbits = _mm256_loadu_si256((const __m256i *)qh);
-
- const __m256i q5l_0 = _mm256_and_si256(q5bits, lowMask);
- const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), k + 0), 4);
- const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0);
- hmask = _mm256_slli_epi16(hmask, 1);
-
- const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), lowMask);
- const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), k + 1), 4);
- const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1);
-
- return _mm512_inserti32x8(_mm512_castsi256_si512(q5_0), q5_1, 1);
-}
-
-// used for block_q6_K
-inline void bytes_from_nibbles_128(__m512i& r0, __m512i& r1, const uint8_t * qs, const uint8_t * qh) {
- const __m256i m4 = _mm256_set1_epi8(0xF);
- const __m256i m2 = _mm256_set1_epi8(0x3);
-
- const __m256i q6bits1 = _mm256_loadu_si256((const __m256i *)qs);
- const __m256i q6bits2 = _mm256_loadu_si256((const __m256i *)(qs + 32));
- const __m256i q6bitsH = _mm256_loadu_si256((const __m256i *)qh);
-
- const __m256i q6h_0 = _mm256_slli_epi16(_mm256_and_si256( q6bitsH, m2), 4);
- const __m256i q6h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q6bitsH, 2), m2), 4);
- const __m256i q6h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q6bitsH, 4), m2), 4);
- const __m256i q6h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q6bitsH, 6), m2), 4);
-
- const __m256i q6_0 = _mm256_or_si256(_mm256_and_si256(q6bits1, m4), q6h_0);
- const __m256i q6_1 = _mm256_or_si256(_mm256_and_si256(q6bits2, m4), q6h_1);
- const __m256i q6_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q6bits1, 4), m4), q6h_2);
- const __m256i q6_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q6bits2, 4), m4), q6h_3);
-
- r0 = _mm512_inserti32x8(_mm512_castsi256_si512(q6_0), q6_1, 1);
- r1 = _mm512_inserti32x8(_mm512_castsi256_si512(q6_2), q6_3, 1);
-}
-
-inline __m512i packNibbles(__m512i r0, __m512i r1) {
- return _mm512_or_si512(r0, _mm512_slli_epi16(r1, 4));
-}
-
-template <typename TB>
-inline void pack_qs(void * RESTRICT packed_B, const TB * RESTRICT B, int KB) {
- int8_t tmp[8 * 64];
- __m256i v[8], v2[8];
- for (int n = 0; n < 8; ++n) {
- v[n] = bytes_from_nibbles_32(B[n * KB].qs);
- }
- transpose_8x8_32bit(v, v2);
- for (int n = 0; n < 8; ++n) {
- _mm256_storeu_si256((__m256i *)(tmp + n * 64), v2[n]);
- }
- for (int n = 0; n < 8; ++n) {
- v[n] = bytes_from_nibbles_32(B[(n + 8) * KB].qs);
- }
- transpose_8x8_32bit(v, v2);
- for (int n = 0; n < 8; ++n) {
- _mm256_storeu_si256((__m256i *)(tmp + n * 64 + 32), v2[n]);
- }
-
- // pack again with 128 to fully utilize vector length
- for (int n = 0; n < 8; n += 2) {
- __m512i r0 = _mm512_loadu_si512((const __m512i *)(tmp + n * 64));
- __m512i r1 = _mm512_loadu_si512((const __m512i *)(tmp + n * 64 + 64));
- __m512i r1r0 = packNibbles(r0, r1);
- _mm512_storeu_si512((__m512i *)((char *)packed_B + n * 32), r1r0);
- }
-}
-
-template <>
-inline void pack_qs<block_q8_0>(void * RESTRICT packed_B, const block_q8_0 * RESTRICT B, int KB) {
- __m256i v[8], v2[8];
- for (int n = 0; n < 8; ++n) {
- v[n] = _mm256_loadu_si256((const __m256i *)(B[n * KB].qs));
- }
- transpose_8x8_32bit(v, v2);
- for (int n = 0; n < 8; ++n) {
- _mm256_storeu_si256((__m256i *)((char *)packed_B + n * 64), v2[n]);
- }
- for (int n = 0; n < 8; ++n) {
- v[n] = _mm256_loadu_si256((const __m256i *)(B[(n + 8) * KB].qs));
- }
- transpose_8x8_32bit(v, v2);
- for (int n = 0; n < 8; ++n) {
- _mm256_storeu_si256((__m256i *)((char *)packed_B + n * 64 + 32), v2[n]);
- }
-}
-
-template <>
-inline void pack_qs<block_q4_K>(void * RESTRICT packed_B, const block_q4_K * RESTRICT B, int KB) {
- __m512i v[16];
- // QK_K 256 with 8 groups, handle 2 groups at a time
- char * pb = (char *)packed_B;
- for (int k = 0; k < QK_K / 64; ++k) {
- // pack 2 groups { n, g, k} to {g, k/4, 4n}
- // e.g. {16, 2, 32} to {2, 8, 64}
- for (int n = 0; n < TILE_N; ++n) {
- v[n] = bytes_from_nibbles_64(B[n * KB].qs + k * 32);
- }
-
- transpose_16x16_32bit(v);
-
- // pack again with 128 to fully utilize vector length
- for (int n = 0; n < TILE_N; n += 2) {
- _mm512_storeu_si512((__m512i *)pb, packNibbles(v[n], v[n + 1]));
- pb += 64;
- }
- }
-}
-
-template <>
-inline void pack_qs<block_q5_K>(void * RESTRICT packed_B, const block_q5_K * RESTRICT B, int KB) {
- __m512i v[16];
- const __m512i lowMask = _mm512_set1_epi8(0xF);
- // QK_K 256 with 8 groups, handle 2 groups at a time
- char * pb = (char *)packed_B;
- char * ph = (char *)packed_B + (QK_K / 2) * TILE_N;
- for (int k = 0; k < QK_K / 64; ++k) {
- // pack 2 groups { n, g, k} to {g, k/4, 4n}
- // e.g. {16, 2, 32} to {2, 8, 64}
- for (int n = 0; n < TILE_N; ++n) {
- v[n] = bytes_from_nibbles_64(B[n * KB].qs + k * 32, B[n * KB].qh, /* group */2 * k);
- }
-
- transpose_16x16_32bit(v);
-
- // 1. pack lower 4bits with 2 groups
- for (int n = 0; n < TILE_N; n += 2) {
- // get lower 4 bits
- const __m512i r0 = _mm512_and_si512(v[n], lowMask);
- const __m512i r1 = _mm512_and_si512(v[n + 1], lowMask);
- _mm512_storeu_si512((__m512i *)pb, packNibbles(r0, r1)); pb += 64;
- }
-
- // 2. pack higher 1bit with 2 groups
- const __m512i hmask = _mm512_set1_epi8(0x10);
- for (int g = 0; g < 2; ++g) {
- __m512i hbits = _mm512_setzero_si512();
- hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 8 + 0], hmask), 4));
- hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 8 + 1], hmask), 3));
- hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 8 + 2], hmask), 2));
- hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 8 + 3], hmask), 1));
- hbits = _mm512_add_epi8(hbits, _mm512_and_si512(v[g * 8 + 4], hmask) );
- hbits = _mm512_add_epi8(hbits, _mm512_slli_epi16(_mm512_and_si512(v[g * 8 + 5], hmask), 1));
- hbits = _mm512_add_epi8(hbits, _mm512_slli_epi16(_mm512_and_si512(v[g * 8 + 6], hmask), 2));
- hbits = _mm512_add_epi8(hbits, _mm512_slli_epi16(_mm512_and_si512(v[g * 8 + 7], hmask), 3));
- _mm512_storeu_si512((__m512i *)ph, hbits); ph += 64;
- }
- }
-}
-
-template <>
-inline void pack_qs<block_q6_K>(void * RESTRICT packed_B, const block_q6_K * RESTRICT B, int KB) {
- __m512i v[32];
- const __m512i lowMask = _mm512_set1_epi8(0xF);
- // QK_K 256 with 8 groups, handle 4 groups at a time
- char * pb = (char *)packed_B;
- char * ph = (char *)packed_B + (QK_K / 2) * TILE_N;
- for (int k = 0; k < QK_K / 128; ++k) {
- for (int n = 0; n < TILE_N; ++n) {
- bytes_from_nibbles_128(v[n], v[n + 16], B[n * KB].ql + k * 64, B[n * KB].qh + k * 32);
- }
-
- // top half: group 0,1 or 4,5; bottom half: group 2,3 or 6,7
- transpose_16x16_32bit(v);
- transpose_16x16_32bit(v + 16);
-
- // 1. pack lower 4bits with 4 groups
- for (int n = 0; n < 32; n += 2) {
- const __m512i r0 = _mm512_and_si512(v[n], lowMask);
- const __m512i r1 = _mm512_and_si512(v[n + 1], lowMask);
- _mm512_storeu_si512((__m512i *)pb, packNibbles(r0, r1)); pb += 64;
- }
-
- // 2. pack higher 2bit with 4 groups
- const __m512i hmask = _mm512_set1_epi8(0x30);
- for (int g = 0; g < 8; ++g) {
- __m512i hbits = _mm512_setzero_si512();
- hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 4 + 0], hmask), 4));
- hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 4 + 1], hmask), 2));
- hbits = _mm512_add_epi8(hbits, _mm512_and_si512(v[g * 4 + 2], hmask) );
- hbits = _mm512_add_epi8(hbits, _mm512_slli_epi16(_mm512_and_si512(v[g * 4 + 3], hmask), 2));
- _mm512_storeu_si512((__m512i *)ph, hbits); ph += 64;
- }
- }
-}
-
-template <>
-inline void pack_qs<block_iq4_xs>(void * RESTRICT packed_B, const block_iq4_xs * RESTRICT B, int KB) {
- __m512i v[16];
- char * pb = (char *)packed_B;
- for (int k = 0; k < QK_K / 64; ++k) {
- for (int n = 0; n < TILE_N; ++n) {
- __m256i r0 = bytes_from_nibbles_32(B[n * KB].qs + k * 32 + 0);
- __m256i r1 = bytes_from_nibbles_32(B[n * KB].qs + k * 32 + 16);
- v[n] = _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1);
- }
-
- transpose_16x16_32bit(v);
-
- // pack again with 128 to fully utilize vector length
- for (int n = 0; n < TILE_N; n += 2) {
- _mm512_storeu_si512((__m512i *)pb, packNibbles(v[n], v[n + 1]));
- pb += 64;
- }
- }
-}
-
-// pack B to vnni formats in 4bits or 8 bits
-void pack_B(void * RESTRICT packed_B, const block_q4_0 * RESTRICT B, int KB) {
- pack_qs(packed_B, B, KB);
- ggml_half * d0 = reinterpret_cast<ggml_half *>((char *)packed_B + TILE_N * TILE_K / 2);
- for (int n = 0; n < TILE_N; ++n) {
- d0[n] = B[n * KB].d;
- }
-}
-
-void pack_B(void * RESTRICT packed_B, const block_q4_1 * RESTRICT B, int KB) {
- pack_qs(packed_B, B, KB);
- ggml_half * d0 = reinterpret_cast<ggml_half *>((char *)packed_B + TILE_N * TILE_K / 2);
- ggml_half * m0 = d0 + TILE_N;
- for (int n = 0; n < TILE_N; ++n) {
- d0[n] = B[n * KB].d;
- m0[n] = B[n * KB].m;
- }
-}
-
-inline void s8s8_compensation(void * RESTRICT packed_B) {
- // packed_B layout:
- // quants {TILE_N, TILEK} int8_t
- // d0 {TILE_N} ggml_half
- // comp {TILE_N} int32_t
- const int offset = TILE_N * TILE_K + TILE_N * sizeof(ggml_half);
- __m512i vcomp = _mm512_setzero_si512();
- const __m512i off = _mm512_set1_epi8(static_cast<char>(0x80));
- for (int k = 0; k < 8; ++k) {
- __m512i vb = _mm512_loadu_si512((const __m512i *)((const char *)packed_B + k * 64));
- vcomp = _mm512_dpbusd_epi32(vcomp, off, vb);
- }
- _mm512_storeu_si512((__m512i *)((char *)(packed_B) + offset), vcomp);
-}
-
-void pack_B(void * RESTRICT packed_B, const block_q8_0 * RESTRICT B, int KB) {
- pack_qs(packed_B, B, KB);
- ggml_half * d0 = reinterpret_cast<ggml_half *>((char *)packed_B + TILE_N * TILE_K);
- for (int n = 0; n < TILE_N; ++n) {
- d0[n] = B[n * KB].d;
- }
- s8s8_compensation(packed_B);
-}
-
-// convert 8 * {min, scale} from int6 to int8
-inline void unpack_mins_and_scales(const uint8_t * scales, uint32_t * utmp) {
- const uint32_t kmask1 = 0x3f3f3f3f;
- const uint32_t kmask2 = 0x0f0f0f0f;
- const uint32_t kmask3 = 0x03030303;
-
- memcpy(utmp, scales, 12);
- utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
- const uint32_t uaux = utmp[1] & kmask1;
- utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
- utmp[2] = uaux;
- utmp[0] &= kmask1;
-}
-
-// packed_B layout:
-// quants {8, TILE_N, 16} uint8
-// scales {8, TILE_N} uint8
-// mins {8, TILE_N} uint8
-// d {TILE_N} ggml_half
-// dmin {TILE_N} ggml_half
-void pack_B(void * RESTRICT packed_B, const block_q4_K * RESTRICT B, int KB) {
- pack_qs(packed_B, B, KB);
-
- uint8_t * scales = reinterpret_cast<uint8_t *>((char *)packed_B + (QK_K / 2) * TILE_N);
- uint8_t * mins = scales + 8 * TILE_N;
- ggml_half * d = reinterpret_cast<ggml_half *>(mins + 8 * TILE_N);
- ggml_half * dmin = d + TILE_N;
-
- union {
- uint32_t u32[4];
- uint8_t u8[16];
- } s;
-
- for (int n = 0; n < TILE_N; ++n) {
- unpack_mins_and_scales(B[n * KB].scales, s.u32);
- for (int k = 0; k < 8; ++k) {
- scales[k * TILE_N + n] = s.u8[k];
- mins[(k >> 1) * TILE_N * 2 + n * 2 + (k & 0x1)] = s.u8[k + 8];
- }
- d[n] = B[n * KB].d;
- dmin[n] = B[n * KB].dmin;
- }
-}
-
-// packed_B layout:
-// quants {8, TILE_N, 16} uint8
-// qh {8, TILE_N, 4} uint8
-// scales {8, TILE_N} uint8
-// mins {8, TILE_N} uint8
-// d {TILE_N} ggml_half
-// dmin {TILE_N} ggml_half
-void pack_B(void * RESTRICT packed_B, const block_q5_K * RESTRICT B, int KB) {
- pack_qs(packed_B, B, KB);
-
- uint8_t * scales = reinterpret_cast<uint8_t *>((char *)packed_B + (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N);
- uint8_t * mins = scales + 8 * TILE_N;
- ggml_half * d = reinterpret_cast<ggml_half *>(mins + 8 * TILE_N);
- ggml_half * dmin = d + TILE_N;
-
- union {
- uint32_t u32[4];
- uint8_t u8[16];
- } s;
-
- for (int n = 0; n < TILE_N; ++n) {
- unpack_mins_and_scales(B[n * KB].scales, s.u32);
- for (int k = 0; k < 8; ++k) {
- scales[k * TILE_N + n] = s.u8[k];
- mins[(k >> 1) * TILE_N * 2 + n * 2 + (k & 0x1)] = s.u8[k + 8];
- }
- d[n] = B[n * KB].d;
- dmin[n] = B[n * KB].dmin;
- }
-}
-
-// packed_B layout:
-// quants {16, TILE_N, 8} uint8
-// qh {16, TILE_N, 4} uint8
-// scales {16, TILE_N} uint8
-// d {TILE_N} ggml_half
-void pack_B(void * RESTRICT packed_B, const block_q6_K * RESTRICT B, int KB) {
- pack_qs(packed_B, B, KB);
-
- uint8_t * scales = reinterpret_cast<uint8_t *>((char *)packed_B + (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N);
- ggml_half * d = reinterpret_cast<ggml_half *>(scales + 16 * TILE_N);
- for (int n = 0; n < TILE_N; ++n) {
- const int8_t * ps = B[n * KB].scales;
- for (int k = 0; k < 16; ++k) {
- scales[k * TILE_N + n] = ps[k];
- }
- d[n] = B[n * KB].d;
- }
-}
-
-// packed_B layout:
-// quants {8, TILE_N, 16} uint8
-// scales {8, TILE_N} int8
-// d {TILE_N} ggml_half
-void pack_B(void * RESTRICT packed_B, const block_iq4_xs * RESTRICT B, int KB) {
- pack_qs(packed_B, B, KB);
-
- int8_t * scales = reinterpret_cast<int8_t *>((char *)packed_B + (QK_K / 2) * TILE_N);
- ggml_half * d = reinterpret_cast<ggml_half *>(scales + 8 * TILE_N);
-
- // pack the scales
- for (int n = 0; n < TILE_N; ++n) {
- uint16_t sh = B[n * KB].scales_h;
- for (int k = 0; k < 8; k += 2) {
- const int16_t ls1 = ((B[n * KB].scales_l[k / 2] & 0xf) | ((sh << 4) & 0x30)) - 32;
- const int16_t ls2 = ((B[n * KB].scales_l[k / 2] >> 4) | ((sh << 2) & 0x30)) - 32;
- scales[(k + 0) * TILE_N + n] = ls1;
- scales[(k + 1) * TILE_N + n] = ls2;
- sh >>= 4;
- }
- d[n] = B[n * KB].d;
- }
-}
-
-template<typename TB, typename packed_B_t = packed_B_type<TB>>
-void unpack_B(packed_B_t * RESTRICT tile, const void * RESTRICT packed_B) {
- GGML_UNUSED(tile);
- GGML_UNUSED(packed_B);
-};
-
-template <>
-void unpack_B<block_q4_0>(int8_t * RESTRICT tile, const void * RESTRICT packed_B) {
- const __m512i off = _mm512_set1_epi8(8);
- const __m512i lowMask = _mm512_set1_epi8(0xF);
- for (int n = 0; n < 8; n += 2) {
- __m512i bytes = _mm512_loadu_si512((const __m512i *)((const char *)packed_B + n * 32));
- const __m512i r0 = _mm512_sub_epi8(_mm512_and_si512(bytes, lowMask), off);
- const __m512i r1 = _mm512_sub_epi8(_mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask), off);
- _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0);
- _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1);
- }
-}
-
-template <>
-void unpack_B<block_q4_1>(uint8_t * RESTRICT tile, const void * RESTRICT packed_B) {
- const __m512i lowMask = _mm512_set1_epi8(0xF);
- for (int n = 0; n < 8; n += 2) {
- __m512i bytes = _mm512_loadu_si512((const __m512i *)((const char *)packed_B + n * 32));
- const __m512i r0 = _mm512_and_si512(bytes, lowMask);
- const __m512i r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
- _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0);
- _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1);
- }
-}
-
-// packed_B_t for QKK is int8_t
-template <typename TB>
-void unpack_B(int8_t * RESTRICT tile, const void * RESTRICT packed_B, int k) {
- const int packed_B_group_size = QK_K / 2 * TILE_N / 8;
- const char * packed_B_group = (const char *)packed_B + k * packed_B_group_size;
- const __m512i lowMask = _mm512_set1_epi8(0xF);
- for (int n = 0; n < 8; n += 2) {
- __m512i bytes = _mm512_loadu_si512(packed_B_group + n * 32);
- const __m512i r0 = _mm512_and_si512(bytes, lowMask);
- const __m512i r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
- _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0);
- _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1);
- }
-}
-
-template <>
-void unpack_B<block_q5_K>(int8_t * RESTRICT tile, const void * RESTRICT packed_B, int k) {
- // lower 4bits, stride 256 bytes
- const int packed_l4_group_size = QK_K / 2 * TILE_N / 8;
- const char * pb = (const char *)packed_B + k * packed_l4_group_size;
-
- // higher 1bit, stride 64 bytes
- const int packed_h1_group_size = QK_K / 8 * TILE_N / 8;
- const char * ph = (const char *)packed_B + (QK_K / 2) * TILE_N + k * packed_h1_group_size;
- const __m512i hbits = _mm512_loadu_si512(ph);
-
- const __m512i lowMask = _mm512_set1_epi8(0xF);
- __m512i hmask0 = _mm512_set1_epi8(0x1);
- __m512i hmask1 = _mm512_set1_epi8(0x2);
-
- for (int n = 0; n < 8; n += 2) {
- __m512i bytes = _mm512_loadu_si512(pb + n * 32);
- __m512i r0 = _mm512_and_si512(bytes, lowMask);
- __m512i r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
- __m512i h0 = _mm512_slli_epi16(_mm512_srli_epi16(_mm512_and_si512(hbits, hmask0), n), 4);
- __m512i h1 = _mm512_slli_epi16(_mm512_srli_epi16(_mm512_and_si512(hbits, hmask1), n + 1), 4);
-
- hmask0 = _mm512_slli_epi16(hmask0, 2);
- hmask1 = _mm512_slli_epi16(hmask1, 2);
- r0 = _mm512_add_epi8(r0, h0);
- r1 = _mm512_add_epi8(r1, h1);
- _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0);
- _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1);
- }
-}
-
-template <>
-void unpack_B<block_q6_K>(int8_t * RESTRICT tile, const void * RESTRICT packed_B, int k) {
- // lower 4bits, stride 128 bytes
- const int packed_l4_group_size = QK_K / 2 * TILE_N / 16;
- const char * pb = (const char *)packed_B + k * packed_l4_group_size;
-
- // higher 2bits, stride 64 bytes
- const int packed_h2_group_size = QK_K / 4 * TILE_N / 16;
- const char * ph = (const char *)packed_B + (QK_K / 2) * TILE_N + k * packed_h2_group_size;
- const __m512i hbits = _mm512_loadu_si512(ph);
-
- const __m512i off = _mm512_set1_epi8(32);
- const __m512i lowMask = _mm512_set1_epi8(0xF);
- __m512i hmask0 = _mm512_set1_epi8(0x3); // 0011
- __m512i hmask1 = _mm512_set1_epi8(0xC); // 1100
-
- // notes: skip zero padding from row4 to row7 as we have done so in `unpack_A`
- __m512i bytes = _mm512_loadu_si512(pb);
- __m512i r0 = _mm512_and_si512(bytes, lowMask);
- __m512i r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
- __m512i h0 = _mm512_slli_epi16(_mm512_and_si512(hbits, hmask0), 4);
- __m512i h1 = _mm512_slli_epi16(_mm512_and_si512(hbits, hmask1), 2);
- _mm512_storeu_si512((__m512i *)(tile + 0), _mm512_sub_epi8(_mm512_add_epi8(r0, h0), off));
- _mm512_storeu_si512((__m512i *)(tile + 64), _mm512_sub_epi8(_mm512_add_epi8(r1, h1), off));
-
- hmask0 = _mm512_slli_epi16(hmask0, 4);
- hmask1 = _mm512_slli_epi16(hmask1, 4);
-
- bytes = _mm512_loadu_si512(pb + 64);
- r0 = _mm512_and_si512(bytes, lowMask);
- r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
- h0 = _mm512_and_si512(hbits, hmask0);
- h1 = _mm512_srli_epi16(_mm512_and_si512(hbits, hmask1), 2);
- _mm512_storeu_si512((__m512i *)(tile + 128), _mm512_sub_epi8(_mm512_add_epi8(r0, h0), off));
- _mm512_storeu_si512((__m512i *)(tile + 192), _mm512_sub_epi8(_mm512_add_epi8(r1, h1), off));
-}
-
-template <>
-void unpack_B<block_iq4_xs>(int8_t * RESTRICT tile, const void * RESTRICT packed_B, int k) {
- static const __m512i values128 = _mm512_set_epi8(
- 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127,
- 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127,
- 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127,
- 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127
- );
-
- const int packed_B_group_size = QK_K / 2 * TILE_N / 8;
- const char * pb = (const char *)packed_B + k * packed_B_group_size;
- const __m512i lowMask = _mm512_set1_epi8(0xF);
-
- for (int n = 0; n < 8; n += 2) {
- __m512i bytes = _mm512_loadu_si512(pb + n * 32);
- const __m512i r0 = _mm512_shuffle_epi8(values128, _mm512_and_si512(bytes, lowMask));
- const __m512i r1 = _mm512_shuffle_epi8(values128, _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask));
- _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0);
- _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1);
- }
-}
-
-template <typename TA, typename TB, bool is_acc>
-struct acc_C {};
-
-template <bool is_acc>
-struct acc_C<block_q8_0, block_q4_0, is_acc> {
- static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_0 * A, int lda, const void * packed_B, int nr) {
- const int offset = TILE_N * TILE_K / 2;
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset)));
-
- for (int m = 0; m < nr; ++m) {
- const __m512 vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].d));
- const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
-
- __m512 vsum;
- if (is_acc) {
- vsum = _mm512_loadu_ps(C + m * ldc);
- } else {
- vsum = _mm512_set1_ps(0.f);
- }
- vsum = _mm512_fmadd_ps(vtile, _mm512_mul_ps(vd0, vd1), vsum);
- _mm512_storeu_ps(C + m * ldc, vsum);
- }
- }
-};
-
-template <bool is_acc>
-struct acc_C<block_q8_1, block_q4_1, is_acc> {
- static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_1 * A, int lda, const void * packed_B, int nr) {
- const int offset = TILE_N * TILE_K / 2;
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset)));
- const __m512 vm0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset + TILE_N * sizeof(ggml_half))));
-
- for (int m = 0; m < nr; ++m) {
- const __m512 vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].d));
- const __m512 vs1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].s));
- const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
-
- __m512 vsum;
- if (is_acc) {
- vsum = _mm512_loadu_ps(C + m * ldc);
- } else {
- vsum = _mm512_set1_ps(0.f);
- }
- vsum = _mm512_fmadd_ps(vtile, _mm512_mul_ps(vd0, vd1), vsum);
- vsum = _mm512_fmadd_ps(vm0, vs1, vsum);
- _mm512_storeu_ps(C + m * ldc, vsum);
- }
- }
-};
-
-template <bool is_acc>
-struct acc_C<block_q8_0, block_q8_0, is_acc> {
- static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_0 * A, int lda, const void * packed_B, int nr) {
- const int offset = TILE_N * TILE_K;
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset)));
-
- for (int m = 0; m < nr; ++m) {
- const __m512 vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].d));
- const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
-
- __m512 vsum;
- if (is_acc) {
- vsum = _mm512_loadu_ps(C + m * ldc);
- } else {
- vsum = _mm512_set1_ps(0.f);
- }
- vsum = _mm512_fmadd_ps(vtile, _mm512_mul_ps(vd0, vd1), vsum);
- _mm512_storeu_ps(C + m * ldc, vsum);
- }
- }
-};
-
-template <bool is_acc>
-struct acc_C<block_q8_K, block_q4_K, is_acc> {
- static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_K * A, int lda, const void * packed_B, int nr) {
- const uint8_t * scales = reinterpret_cast<const uint8_t *>((const char *)packed_B + (QK_K / 2) * TILE_N);
- const uint8_t * mins = scales + 8 * TILE_N;
- const ggml_half * d0 = reinterpret_cast<const ggml_half *>(mins + 8 * TILE_N);
- const ggml_half * dmin = d0 + TILE_N;
-
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)d0));
- const __m512 vdmin = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)dmin));
-
- for (int m = 0; m < nr; ++m) {
- const float d1 = A[m * lda].d;
- const __m512 vd = _mm512_mul_ps(_mm512_set1_ps(d1), vd0);
- const __m512 vdm = _mm512_mul_ps(_mm512_set1_ps(-d1), vdmin);
- const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
-
- __m512 vsum;
- if (is_acc) {
- vsum = _mm512_loadu_ps(C + m * ldc);
- } else {
- vsum = _mm512_set1_ps(0.f);
- }
-
- const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[m * lda].bsums);
- const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
-
- __m512i acc_m = _mm512_setzero_si512();
- for (int k = 0; k < 4; ++k) {
- __m512i vmask = _mm512_set1_epi32(k);
- __m512i va = _mm512_permutexvar_epi32(vmask, _mm512_castsi128_si512(q8s));
- __m512i vb = _mm512_cvtepi8_epi16(_mm256_loadu_si256((const __m256i *)(mins + k * 32)));
- acc_m = _mm512_dpwssds_epi32(acc_m, va, vb);
- }
-
- vsum = _mm512_fmadd_ps(vtile, vd, vsum);
- vsum = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc_m), vdm, vsum);
- _mm512_storeu_ps(C + m * ldc, vsum);
- }
- }
-};
-
-template <bool is_acc>
-struct acc_C<block_q8_K, block_q5_K, is_acc> {
- static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_K * A, int lda, const void * packed_B, int nr) {
- const uint8_t * scales = reinterpret_cast<const uint8_t *>((const char *)packed_B + (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N);
- const uint8_t * mins = scales + 8 * TILE_N;
- const ggml_half * d0 = reinterpret_cast<const ggml_half *>(mins + 8 * TILE_N);
- const ggml_half * dmin = d0 + TILE_N;
-
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)d0));
- const __m512 vdmin = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)dmin));
-
- for (int m = 0; m < nr; ++m) {
- const float d1 = A[m * lda].d;
- const __m512 vd = _mm512_mul_ps(_mm512_set1_ps(d1), vd0);
- const __m512 vdm = _mm512_mul_ps(_mm512_set1_ps(-d1), vdmin);
- const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
-
- __m512 vsum;
- if (is_acc) {
- vsum = _mm512_loadu_ps(C + m * ldc);
- } else {
- vsum = _mm512_set1_ps(0.f);
- }
-
- const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[m * lda].bsums);
- const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
-
- __m512i acc_m = _mm512_setzero_si512();
- for (int k = 0; k < 4; ++k) {
- __m512i vmask = _mm512_set1_epi32(k);
- __m512i va = _mm512_permutexvar_epi32(vmask, _mm512_castsi128_si512(q8s));
- __m512i vb = _mm512_cvtepi8_epi16(_mm256_loadu_si256((const __m256i *)(mins + k * 32)));
- acc_m = _mm512_dpwssds_epi32(acc_m, va, vb);
- }
-
- vsum = _mm512_fmadd_ps(vtile, vd, vsum);
- vsum = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc_m), vdm, vsum);
- _mm512_storeu_ps(C + m * ldc, vsum);
- }
- }
-};
-
-template <bool is_acc>
-struct acc_C<block_q8_K, block_q6_K, is_acc> {
- static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_K * A, int lda, const void * packed_B, int nr) {
- const uint8_t * scales = reinterpret_cast<const uint8_t *>((const char *)packed_B + (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N);
- const ggml_half * d0 = reinterpret_cast<const ggml_half *>(scales + 16 * TILE_N);
-
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)d0));
-
- for (int m = 0; m < nr; ++m) {
- const float d1 = A[m * lda].d;
- const __m512 vd = _mm512_mul_ps(_mm512_set1_ps(d1), vd0);
- const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
-
- __m512 vsum;
- if (is_acc) {
- vsum = _mm512_loadu_ps(C + m * ldc);
- } else {
- vsum = _mm512_set1_ps(0.f);
- }
-
- vsum = _mm512_fmadd_ps(vtile, vd, vsum);
- _mm512_storeu_ps(C + m * ldc, vsum);
- }
- }
-};
-
-template <bool is_acc>
-struct acc_C<block_q8_K, block_iq4_xs, is_acc> {
- static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_K * A, int lda, const void * packed_B, int nr) {
- const int8_t * scales = reinterpret_cast<const int8_t *>((const char *)packed_B + (QK_K / 2) * TILE_N);
- const ggml_half * d0 = reinterpret_cast<const ggml_half *>(scales + 8 * TILE_N);
-
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)d0));
-
- for (int m = 0; m < nr; ++m) {
- const float d1 = A[m * lda].d;
- const __m512 vd = _mm512_mul_ps(_mm512_set1_ps(d1), vd0);
- const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
-
- __m512 vsum;
- if (is_acc) {
- vsum = _mm512_loadu_ps(C + m * ldc);
- } else {
- vsum = _mm512_set1_ps(0.f);
- }
-
- vsum = _mm512_fmadd_ps(vtile, vd, vsum);
- _mm512_storeu_ps(C + m * ldc, vsum);
- }
- }
-};
-
-template <typename TB> constexpr int get_quants_size();
-template <> constexpr int get_quants_size<block_q4_K>() { return (QK_K / 2) * TILE_N; }
-template <> constexpr int get_quants_size<block_q5_K>() { return (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N; }
-template <> constexpr int get_quants_size<block_q6_K>() { return (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N; }
-template <> constexpr int get_quants_size<block_iq4_xs>() { return (QK_K / 2) * TILE_N; }
-
-// used for QKK format
-template <typename TB, bool is_acc,
- typename std::enable_if<is_type_qkk<TB>::value, int>::type = 0>
-inline void scale_C(const int32_t * RESTRICT tile, int32_t * RESTRICT sumi, const void * packed_B, int k, int nr) {
- const uint8_t * scales = reinterpret_cast<const uint8_t *>((const char *)packed_B + get_quants_size<TB>());
- const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(scales + k * TILE_N)));
-
- for (int m = 0; m < nr; ++m) {
- __m512i vsumi;
- if (is_acc) {
- vsumi = _mm512_loadu_si512(sumi + m * TILE_N);
- } else {
- vsumi = _mm512_setzero_si512();
- }
- __m512i vtile = _mm512_loadu_si512(tile + m * TILE_N);
- vsumi = _mm512_add_epi32(vsumi, _mm512_mullo_epi32(vtile, vscale));
- _mm512_storeu_si512((__m512i *)(sumi + m * TILE_N), vsumi);
- }
-}
-
-template <typename TA, typename TB, typename TC, int BLOCK_M, int BLOCK_N, int BLOCK_K>
-struct tinygemm_kernel_avx {
- static void apply(int K, const TA * RESTRICT A, const TB * RESTRICT B, TC * RESTRICT C, int ldc) {
- GGML_UNUSED(K);
- GGML_UNUSED(A);
- GGML_UNUSED(B);
- GGML_UNUSED(C);
- GGML_UNUSED(ldc);
- }
-};
-
-template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
-struct tinygemm_kernel_avx<float, ggml_fp16_t, float, BLOCK_M, BLOCK_N, BLOCK_K> {
- static void apply(int K, const float * RESTRICT A, const ggml_fp16_t * RESTRICT B, float * RESTRICT C, int ldc) {
- constexpr int ROWS = BLOCK_M;
- constexpr int COLS = BLOCK_N;
- assert(BLOCK_K == 16);
-
- __m512 va;
- __m512 vb[COLS];
- __m512 vc[ROWS * COLS];
-
- auto loadc = [&](int idx) {
- vc[idx] = _mm512_setzero_ps();
- };
- Unroll<ROWS * COLS>{}(loadc);
-
- auto compute = [&](int idx, int k) {
- // TODO: use `constexpr` here to get rid of interger div
- // when upgraded to C++17
- const int row = idx / COLS;
- const int col = idx % COLS;
-
- if (col == 0) {
- va = _mm512_loadu_ps(A + row * K + k);
- }
- if (row == 0) {
- vb[col] = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(B + col * K + k)));
- }
- vc[idx] = _mm512_fmadd_ps(va, vb[col], vc[idx]);
- };
-
- for (int k = 0; k < K; k += 16) {
- Unroll<ROWS * COLS>{}(compute, k);
- }
-
- auto storec = [&](int idx) {
- const int row = idx / COLS;
- const int col = idx % COLS;
- C[row * ldc + col] = _mm512_reduce_add_ps(vc[idx]);
- };
- Unroll<ROWS * COLS>{}(storec);
- }
-};
-
-#define LAUNCH_TINYGEMM_KERNEL_AVX(MB_SIZE, NB_SIZE) \
- tinygemm_kernel_avx<float, type, float, MB_SIZE, NB_SIZE, blck_size>::apply( \
- K, (const float *)src1->data + mb_start * K, \
- (const type *)src0->data + nb_start * K, \
- (float *)dst->data + mb_start * ldc + nb_start, ldc);
-
-
-// re-organize in the format {NB, KB, TILE_SIZE}:
-#define PACKED_INDEX(n, k, KB, tile_size) (n * KB + k) * tile_size
-
-template<typename TB, int BLOCK_K>
-void convert_B_packed_format(void * RESTRICT packed_B, const TB * RESTRICT B, int N, int K, int n_threads) {
- const int NB = N / TILE_N;
- const int KB = K / BLOCK_K;
- const int TILE_SIZE = get_tile_size<TB>();
-
- // parallel on NB should be enough
- parallel_for(n_threads, NB, [&](int begin, int end) {
- for (int n = begin; n < end; ++n) {
- for (int k = 0; k < KB; ++k) {
- int n0 = n * TILE_N;
- pack_B((char *)packed_B + PACKED_INDEX(n, k, KB, TILE_SIZE), &B[n0 * KB + k], KB);
- }
- }
- });
-}
-
-template <typename TA, typename TB, typename TC, int BLOCK_M, int BLOCK_N, int BLOCK_K>
-struct tinygemm_kernel_vnni {};
-
-template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
-struct tinygemm_kernel_vnni<block_q8_0, block_q4_0, float, BLOCK_M, BLOCK_N, BLOCK_K> {
- static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
-
- constexpr int COLS = BLOCK_N / 16;
- const int TILE_SIZE = TILE_N * sizeof(block_q4_0);
-
- const block_q8_0 * RESTRICT A = static_cast<const block_q8_0 *>(_A);
- const char * RESTRICT B = static_cast<const char *>(_B);
-
- __m512i va[8];
- __m512 vc[COLS];
- __m512 vd1;
-
- // sum of offsets, shared across COLS
- //
- // avx512-vnni does not have `_mm512_dpbssd_epi32`,
- // need to transfrom ss to us:
- // a * (b - 8) is equavilent to b * a - 8 * a
- // s u u u s u s
- //
- __m512i vcomp;
-
- const __m512i off = _mm512_set1_epi8(8);
- const __m512i lowMask = _mm512_set1_epi8(0xF);
-
- auto loadc = [&](int col) {
- vc[col] = _mm512_setzero_ps();
- };
- Unroll<COLS>{}(loadc);
-
- auto compute = [&](int col, int i) {
- // load a and compute compensation
- if (col == 0) {
- const int32_t * a_ptr = reinterpret_cast<const int32_t *>(A[0 * KB + i].qs);
- vcomp = _mm512_setzero_si512();
- for (int k = 0; k < 8; ++k) {
- va[k] = _mm512_set1_epi32(a_ptr[k]);
- vcomp = _mm512_dpbusd_epi32(vcomp, off, va[k]);
- }
- vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[0 * KB + i].d));
- }
-
- // load b
- __m512i vsum = _mm512_setzero_si512();
- const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
- for (int k = 0; k < 8; k += 2) {
- __m512i bytes = _mm512_loadu_si512((const __m512i *)(b_ptr + k * 32));
- __m512i vb0 = _mm512_and_si512(bytes, lowMask);
- vsum = _mm512_dpbusd_epi32(vsum, vb0, va[k + 0]);
- __m512i vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
- vsum = _mm512_dpbusd_epi32(vsum, vb1, va[k + 1]);
- }
- const int offset = TILE_N * TILE_K / 2;
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset)));
- vsum = _mm512_sub_epi32(vsum, vcomp);
-
- vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(vsum), _mm512_mul_ps(vd0, vd1), vc[col]);
- };
-
- for (int i = 0; i < KB; ++i) {
- Unroll<COLS>{}(compute, i);
- }
-
- //store to C
- auto storec = [&](int col) {
- _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
- };
- Unroll<COLS>{}(storec);
- }
-};
-
-template <int BLOCK_N, int BLOCK_K>
-struct tinygemm_kernel_vnni<block_q8_1, block_q4_1, float, 1, BLOCK_N, BLOCK_K> {
- static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
-
- constexpr int COLS = BLOCK_N / 16;
- const int TILE_SIZE = TILE_N * sizeof(block_q4_1);
-
- const block_q8_1 * RESTRICT A = static_cast<const block_q8_1 *>(_A);
- const char * RESTRICT B = static_cast<const char *>(_B);
-
- __m512i va[8];
- __m512i vb[8];
- __m512 vc[COLS];
- __m512 vd1, vs1;
-
- const __m512i lowMask = _mm512_set1_epi8(0xF);
-
- auto loadc = [&](int col) {
- vc[col] = _mm512_setzero_ps();
- };
- Unroll<COLS>{}(loadc);
-
- auto compute = [&](int col, int i) {
- // load a
- if (col == 0) {
- const int32_t * a_ptr = reinterpret_cast<const int32_t *>(A[0 * KB + i].qs);
- for (int k = 0; k < 8; ++k) {
- va[k] = _mm512_set1_epi32(a_ptr[k]);
- }
- vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[0 * KB + i].d));
- vs1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[0 * KB + i].s));
- }
-
- // load b
- const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
- for (int k = 0; k < 8; k += 2) {
- __m512i bytes = _mm512_loadu_si512((const __m512i *)(b_ptr + k * 32));
- vb[k + 0] = _mm512_and_si512(bytes, lowMask);
- vb[k + 1] = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
- }
- const int offset = TILE_N * TILE_K / 2;
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset)));
- const __m512 vm0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset + TILE_N * sizeof(ggml_half))));
-
- __m512i vsum = _mm512_setzero_si512();
- for (int k = 0; k < 8; ++k) {
- vsum = _mm512_dpbusd_epi32(vsum, vb[k], va[k]);
- }
-
- vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(vsum), _mm512_mul_ps(vd0, vd1), vc[col]);
- vc[col] = _mm512_fmadd_ps(vm0, vs1, vc[col]);
- };
-
- for (int i = 0; i < KB; ++i) {
- Unroll<COLS>{}(compute, i);
- }
-
- //store to C
- auto storec = [&](int col) {
- _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
- };
- Unroll<COLS>{}(storec);
- }
-};
-
-template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
-struct tinygemm_kernel_vnni<block_q8_0, block_q8_0, float, BLOCK_M, BLOCK_N, BLOCK_K> {
- static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
-
- constexpr int COLS = BLOCK_N / 16;
- const int TILE_SIZE = TILE_N * sizeof(block_q8_0) + TILE_N * sizeof(int32_t);
-
- const block_q8_0 * RESTRICT A = static_cast<const block_q8_0 *>(_A);
- const char * RESTRICT B = static_cast<const char *>(_B);
-
- __m512i va[8];
- __m512i vb[8];
- __m512 vc[COLS];
- __m512 vd1;
-
- // Notes: s8s8 igemm compensation in avx512-vnni
- // change s8s8 to u8s8 with compensate
- // a * b = (a + 128) * b - 128 * b
- // s s u s u s
- //
- // (128 * b is pre-computed when packing B to vnni formats)
- //
- const __m512i off = _mm512_set1_epi8(static_cast<char>(0x80));
-
- auto loadc = [&](int col) {
- vc[col] = _mm512_setzero_ps();
- };
- Unroll<COLS>{}(loadc);
-
- auto compute = [&](int col, int i) {
- // load a and add offset 128
- if (col == 0) {
- const int32_t * a_ptr = reinterpret_cast<const int32_t *>(A[0 * KB + i].qs);
- for (int k = 0; k < 8; ++k) {
- va[k] = _mm512_set1_epi32(a_ptr[k]);
- va[k] = _mm512_add_epi8(va[k], off);
- }
- vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[0 * KB + i].d));
- }
-
- // load b
- const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
- for (int k = 0; k < 8; ++k) {
- vb[k] = _mm512_loadu_si512((const __m512i *)(b_ptr + k * 64));
- }
- const int offset = TILE_N * TILE_K;
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset)));
- const int offset2 = TILE_N * TILE_K + TILE_N * sizeof(ggml_half);
- const __m512i vcomp = _mm512_loadu_si512((const __m512i *)(b_ptr + offset2));
-
- __m512i vsum = _mm512_setzero_si512();
- for (int k = 0; k < 8; ++k) {
- vsum = _mm512_dpbusd_epi32(vsum, va[k], vb[k]);
- }
- vsum = _mm512_sub_epi32(vsum, vcomp);
-
- vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(vsum), _mm512_mul_ps(vd0, vd1), vc[col]);
- };
-
- for (int i = 0; i < KB; ++i) {
- Unroll<COLS>{}(compute, i);
- }
-
- //store to C
- auto storec = [&](int col) {
- _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
- };
- Unroll<COLS>{}(storec);
- }
-};
-
-template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
-struct tinygemm_kernel_vnni<block_q8_K, block_q4_K, float, BLOCK_M, BLOCK_N, BLOCK_K> {
- static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
-
- constexpr int COLS = BLOCK_N / 16;
- const int TILE_SIZE = TILE_N * sizeof(block_q4_K) + TILE_N * 4;
-
- const block_q8_K * RESTRICT A = static_cast<const block_q8_K *>(_A);
- const char * RESTRICT B = static_cast<const char *>(_B);
-
- // a.qs: 8 groups, 32 bytes each group (m256i)
- __m512i va[8];
- // a.bsum: 8 groups, 2 bytes each group (m128i)
- __m512i va_bsum;
- __m512 vc[COLS];
- __m512 vd1;
-
- // packed_B:
- const int offset_scales = (QK_K / 2) * TILE_N;
- const int offset_mins = (QK_K / 2) * TILE_N + 8 * TILE_N;
- const int offset_d0 = (QK_K / 2) * TILE_N + 16 * TILE_N;
- const int offset_dmin = (QK_K / 2) * TILE_N + 16 * TILE_N + TILE_N * sizeof(ggml_half);
-
- const __m512i lowMask = _mm512_set1_epi8(0xF);
-
- auto loadc = [&](int col) {
- vc[col] = _mm512_setzero_ps();
- };
- Unroll<COLS>{}(loadc);
-
- // Notes: vnni formats in QK_K
- // a) quants vnni format
- // int8 {k/4, n, 4}, viewed as 2d {k/4, 4n}, k = 32
- // from {16, 32} to {8, 64}
- //
- // b) min vnni format
- // int16 {k/2, n, 2}, viewed as 2d {k/2, 2n}, k = 8
- // from {16, 8} to {4, 32}
- //
- auto compute = [&](int col, int i) {
- // load a
- if (col == 0) {
- for (int k_group = 0; k_group < QK_K / 32; ++k_group) {
- va[k_group] = _mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)(A[0 * KB + i].qs + k_group * 32)));
- }
- const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[0 * KB + i].bsums);
- const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
- va_bsum = _mm512_castsi128_si512(q8s);
- vd1 = _mm512_set1_ps(A[0 * KB + i].d);
- }
-
- // step 1: accumultate the quants
- __m512i acc = _mm512_setzero_si512();
- const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
- const char * b_qs = b_ptr;
- for (int k_group = 0; k_group < QK_K / 32; ++k_group) {
- __m512i vsum = _mm512_setzero_si512();
- for (int k = 0; k < 8; k += 2) {
- __m512i va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(k + 0), va[k_group]);
- __m512i va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(k + 1), va[k_group]);
-
- __m512i bytes = _mm512_loadu_si512((const __m512i *)b_qs);
- __m512i vb0 = _mm512_and_si512(bytes, lowMask);
- vsum = _mm512_dpbusd_epi32(vsum, vb0, va0);
- __m512i vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
- vsum = _mm512_dpbusd_epi32(vsum, vb1, va1);
-
- b_qs += 64;
- }
- // vacc += scale * (q8 @ q4)
- const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(b_ptr + offset_scales + k_group * TILE_N)));
- acc = _mm512_add_epi32(acc, _mm512_mullo_epi32(vsum, vscale));
- }
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_d0)));
- vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc), _mm512_mul_ps(vd0, vd1), vc[col]);
-
- // step 2: accumulate the mins
- __m512i acc_m = _mm512_setzero_si512();
- for (int k = 0; k < 4; ++k) {
- __m512i vmask = _mm512_set1_epi32(k);
- __m512i va = _mm512_permutexvar_epi32(vmask, va_bsum);
- __m512i vb = _mm512_cvtepi8_epi16(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_mins + k * 32)));
- acc_m = _mm512_dpwssds_epi32(acc_m, va, vb);
- }
- const __m512 vdmin = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_dmin)));
- vc[col] = _mm512_fnmadd_ps(_mm512_cvtepi32_ps(acc_m), _mm512_mul_ps(vdmin, vd1), vc[col]);
- };
-
- for (int i = 0; i < KB; ++i) {
- Unroll<COLS>{}(compute, i);
- }
-
- //store to C
- auto storec = [&](int col) {
- _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
- };
- Unroll<COLS>{}(storec);
- }
-};
-
-template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
-struct tinygemm_kernel_vnni<block_q8_K, block_q5_K, float, BLOCK_M, BLOCK_N, BLOCK_K> {
- static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
-
- constexpr int COLS = BLOCK_N / 16;
- const int TILE_SIZE = TILE_N * sizeof(block_q5_K) + TILE_N * 4;
-
- const block_q8_K * RESTRICT A = static_cast<const block_q8_K *>(_A);
- const char * RESTRICT B = static_cast<const char *>(_B);
-
- // a.qs: 8 groups, 32 bytes each group (m256i)
- __m512i va[8];
- // a.bsum: 8 groups, 2 bytes each group (m128i)
- __m512i va_bsum;
- __m512 vc[COLS];
- __m512 vd1;
-
- // packed_B:
- const int offset_qh = (QK_K / 2) * TILE_N;
- const int offset_scales = (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N;
- const int offset_mins = (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N + 8 * TILE_N;
- const int offset_d0 = (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N + 16 * TILE_N;
- const int offset_dmin = (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N + 16 * TILE_N + TILE_N * sizeof(ggml_half);
-
- const __m512i lowMask = _mm512_set1_epi8(0xF);
-
- auto loadc = [&](int col) {
- vc[col] = _mm512_setzero_ps();
- };
- Unroll<COLS>{}(loadc);
-
- // Q5_K and Q4_K shares the same vnni formats, refer to notes above.
- auto compute = [&](int col, int i) {
- // load a
- if (col == 0) {
- for (int k_group = 0; k_group < QK_K / 32; ++k_group) {
- va[k_group] = _mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)(A[0 * KB + i].qs + k_group * 32)));
- }
- const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[0 * KB + i].bsums);
- const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
- va_bsum = _mm512_castsi128_si512(q8s);
- vd1 = _mm512_set1_ps(A[0 * KB + i].d);
- }
-
- // step 1: accumultate the quants
- __m512i acc = _mm512_setzero_si512();
- const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
- const char * b_qs = b_ptr;
- const char * b_qh = b_ptr + offset_qh;
- for (int k_group = 0; k_group < QK_K / 32; ++k_group) {
- __m512i vsum = _mm512_setzero_si512();
- __m512i hmask0 = _mm512_set1_epi8(0x1);
- __m512i hmask1 = _mm512_set1_epi8(0x2);
- __m512i hbits = _mm512_loadu_si512((const __m512i *)(b_qh + k_group * 64));
- for (int k = 0; k < 8; k += 2) {
- __m512i va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(k + 0), va[k_group]);
- __m512i va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(k + 1), va[k_group]);
-
- __m512i bytes = _mm512_loadu_si512((const __m512i *)b_qs);
- __m512i vb0 = _mm512_and_si512(bytes, lowMask);
- __m512i vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
-
- __m512i vh0 = _mm512_slli_epi16(_mm512_srli_epi16(_mm512_and_si512(hbits, hmask0), k), 4);
- __m512i vh1 = _mm512_slli_epi16(_mm512_srli_epi16(_mm512_and_si512(hbits, hmask1), k + 1), 4);
-
- hmask0 = _mm512_slli_epi16(hmask0, 2);
- hmask1 = _mm512_slli_epi16(hmask1, 2);
- vb0 = _mm512_add_epi8(vb0, vh0);
- vb1 = _mm512_add_epi8(vb1, vh1);
-
- vsum = _mm512_dpbusd_epi32(vsum, vb0, va0);
- vsum = _mm512_dpbusd_epi32(vsum, vb1, va1);
-
- b_qs += 64;
- }
- // vacc += scale * (q8 @ q5)
- const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(b_ptr + offset_scales + k_group * TILE_N)));
- acc = _mm512_add_epi32(acc, _mm512_mullo_epi32(vsum, vscale));
- }
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_d0)));
- vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc), _mm512_mul_ps(vd0, vd1), vc[col]);
-
- // step 2: accumulate the mins
- __m512i acc_m = _mm512_setzero_si512();
- for (int k = 0; k < 4; ++k) {
- __m512i vmask = _mm512_set1_epi32(k);
- __m512i va = _mm512_permutexvar_epi32(vmask, va_bsum);
- __m512i vb = _mm512_cvtepi8_epi16(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_mins + k * 32)));
- acc_m = _mm512_dpwssds_epi32(acc_m, va, vb);
- }
- const __m512 vdmin = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_dmin)));
- vc[col] = _mm512_fnmadd_ps(_mm512_cvtepi32_ps(acc_m), _mm512_mul_ps(vdmin, vd1), vc[col]);
- };
-
- for (int i = 0; i < KB; ++i) {
- Unroll<COLS>{}(compute, i);
- }
-
- //store to C
- auto storec = [&](int col) {
- _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
- };
- Unroll<COLS>{}(storec);
- }
-};
-
-template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
-struct tinygemm_kernel_vnni<block_q8_K, block_q6_K, float, BLOCK_M, BLOCK_N, BLOCK_K> {
- static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
-
- constexpr int COLS = BLOCK_N / 16;
- const int TILE_SIZE = TILE_N * sizeof(block_q6_K);
-
- const block_q8_K * RESTRICT A = static_cast<const block_q8_K *>(_A);
- const char * RESTRICT B = static_cast<const char *>(_B);
-
- // load the 256 bytes from A to 4 avx512 vectors
- __m512i va[4];
- __m512 vc[COLS];
- __m512 vd1;
-
- // packed_B:
- const int offset_qh = (QK_K / 2) * TILE_N;
- const int offset_scales = (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N;
- const int offset_d0 = (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N + 16 * TILE_N;
-
- // compensation
- __m512i vcomp;
-
- const __m512i m32s = _mm512_set1_epi32(32);
- const __m512i lowMask = _mm512_set1_epi8(0xF);
-
- auto loadc = [&](int col) {
- vc[col] = _mm512_setzero_ps();
- };
- Unroll<COLS>{}(loadc);
-
- auto compute = [&](int col, int i) {
- if (col == 0) {
- // load a
- va[0] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 0));
- va[1] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 64));
- va[2] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 128));
- va[3] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 192));
-
- const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[0 * KB + i].bsums);
- vcomp = _mm512_mullo_epi32(_mm512_cvtepi16_epi32(q8sums), m32s);
- vd1 = _mm512_set1_ps(A[0 * KB + i].d);
- }
-
- // accmulate the quants
- __m512i acc = _mm512_setzero_si512();
- const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
- const char * b_qs = b_ptr;
- const char * b_qh = b_ptr + offset_qh;
- int mask = 0;
- for (int k_group = 0; k_group < QK_K / 16; ++k_group) {
- int r = k_group >> 2;
- __m512i va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]);
- __m512i va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]);
-
- __m512i vsum = _mm512_setzero_si512();
- __m512i hmask = _mm512_set1_epi8(0x3);
-
- __m512i bytes = _mm512_loadu_si512(b_qs);
- __m512i hbits = _mm512_loadu_si512(b_qh);
- __m512i vb0 = _mm512_and_si512(bytes, lowMask);
- __m512i vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
- __m512i vh0 = _mm512_slli_epi16(_mm512_and_si512(hbits, hmask), 4);
- __m512i vh1 = _mm512_slli_epi16(_mm512_and_si512(hbits, _mm512_slli_epi16(hmask, 2)), 2);
-
- vb0 = _mm512_add_epi8(vb0, vh0);
- vb1 = _mm512_add_epi8(vb1, vh1);
- vsum = _mm512_dpbusd_epi32(vsum, vb0, va0);
- vsum = _mm512_dpbusd_epi32(vsum, vb1, va1);
- b_qs += 64;
-
- va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]);
- va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]);
-
- bytes = _mm512_loadu_si512(b_qs);
- vb0 = _mm512_and_si512(bytes, lowMask);
- vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
- vh0 = _mm512_and_si512(hbits, _mm512_slli_epi16(hmask, 4));
- vh1 = _mm512_srli_epi16(_mm512_and_si512(hbits, _mm512_slli_epi16(hmask, 6)), 2);
- vb0 = _mm512_add_epi8(vb0, vh0);
- vb1 = _mm512_add_epi8(vb1, vh1);
- vsum = _mm512_dpbusd_epi32(vsum, vb0, va0);
- vsum = _mm512_dpbusd_epi32(vsum, vb1, va1);
- b_qs += 64;
- b_qh += 64;
-
- // B * A - 32 * A
- __m512i vmask = _mm512_set1_epi32(k_group);
- vsum = _mm512_sub_epi32(vsum, _mm512_permutexvar_epi32(vmask, vcomp));
-
- // vacc += scale * (q8 @ q6)
- const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(b_ptr + offset_scales + k_group * TILE_N)));
- acc = _mm512_add_epi32(acc, _mm512_mullo_epi32(vsum, vscale));
- }
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_d0)));
- vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc), _mm512_mul_ps(vd0, vd1), vc[col]);
- };
-
- for (int i = 0; i < KB; ++i) {
- Unroll<COLS>{}(compute, i);
- }
-
- //store to C
- auto storec = [&](int col) {
- _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
- };
- Unroll<COLS>{}(storec);
- }
-};
-
-template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
-struct tinygemm_kernel_vnni<block_q8_K, block_iq4_xs, float, BLOCK_M, BLOCK_N, BLOCK_K> {
- static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
-
- constexpr int COLS = BLOCK_N / 16;
- const int TILE_SIZE = TILE_N * sizeof(block_iq4_xs) + TILE_N * 2;
-
- const block_q8_K * RESTRICT A = static_cast<const block_q8_K *>(_A);
- const char * RESTRICT B = static_cast<const char *>(_B);
-
- // load the 256 bytes from A to 4 avx512 vectors
- __m512i va[4];
- __m512 vc[COLS];
- __m512 vd1;
-
- // packed_B:
- const int offset_scales = (QK_K / 2) * TILE_N ;
- const int offset_d0 = (QK_K / 2) * TILE_N + 8 * TILE_N;
-
- // compensation
- __m512i vcomp;
-
- const __m256i m128s = _mm256_set1_epi16(128);
- const __m512i lowMask = _mm512_set1_epi8(0xF);
-
- const __m512i values128 = _mm512_set_epi8(
- 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127,
- 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127,
- 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127,
- 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127
- );
- const __m512i off = _mm512_set1_epi8(static_cast<char>(0x80));
- const __m512i values256 = _mm512_add_epi8(values128, off);
-
- auto loadc = [&](int col) {
- vc[col] = _mm512_setzero_ps();
- };
- Unroll<COLS>{}(loadc);
-
- auto compute = [&](int col, int i) {
- if (col == 0) {
- // load a
- va[0] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 0));
- va[1] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 64));
- va[2] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 128));
- va[3] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 192));
-
- // compensation: 128 * A
- const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[0 * KB + i].bsums);
- vcomp = _mm512_castsi256_si512(_mm256_madd_epi16(q8sums, m128s));
- vd1 = _mm512_set1_ps(A[0 * KB + i].d);
- }
-
- // accmulate the quants
- __m512i acc = _mm512_setzero_si512();
- const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
- const char * b_qs = b_ptr;
- int mask = 0;
- for (int k_group = 0; k_group < QK_K / 32; ++k_group) {
- int r = k_group >> 1;
- __m512i vmask = _mm512_set1_epi32(k_group);
- __m512i vsum = _mm512_setzero_si512();
- for (int k = 0; k < 8; k += 2) {
- __m512i va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]);
- __m512i va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]);
-
- __m512i bytes = _mm512_loadu_si512(b_qs);
- __m512i vb0 = _mm512_shuffle_epi8(values256, _mm512_and_si512(bytes, lowMask));
- __m512i vb1 = _mm512_shuffle_epi8(values256, _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask));
-
- vsum = _mm512_dpbusd_epi32(vsum, vb0, va0);
- vsum = _mm512_dpbusd_epi32(vsum, vb1, va1);
- b_qs += 64;
- }
- // (B + 128) * A - 128 * A
- vsum = _mm512_sub_epi32(vsum, _mm512_permutexvar_epi32(vmask, vcomp));
-
- // vacc += scale * (q8 @ q4)
- const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(b_ptr + offset_scales + k_group * TILE_N)));
- acc = _mm512_add_epi32(acc, _mm512_mullo_epi32(vsum, vscale));
- }
- const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_d0)));
- vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc), _mm512_mul_ps(vd0, vd1), vc[col]);
- };
-
- for (int i = 0; i < KB; ++i) {
- Unroll<COLS>{}(compute, i);
- }
-
- //store to C
- auto storec = [&](int col) {
- _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
- };
- Unroll<COLS>{}(storec);
- }
-};
-
-#define LAUNCH_TINYGEMM_KERNEL_VNNI(NB_SIZE) \
- tinygemm_kernel_vnni<vec_dot_type, type, float, 1, NB_SIZE, blck_size>::apply( \
- KB, (const char *)wdata + 0 * row_size_A, \
- (const char *)src0->data + PACKED_INDEX(nb * kTilesN, 0, KB, TILE_SIZE), \
- (float *) dst->data + 0 * N + nb_start, ldc)
-
-template <typename TA, typename TB, typename TC, int BLOCK_K,
- typename std::enable_if<!is_type_qkk<TB>::value, int>::type = 0>
-void tinygemm_kernel_amx(int M, int N, int KB, const void * RESTRICT _A, const void * RESTRICT _B, TC * RESTRICT C, int ldc) {
- using packed_B_t = packed_B_type<TB>;
- const int TILE_SIZE = get_tile_size<TB>();
- const bool need_unpack = do_unpack<TB>::value;
-
- GGML_ASSERT(M <= 2 * TILE_M && N == 2 * TILE_N);
- const TA * RESTRICT A = static_cast<const TA *>(_A);
- const char * RESTRICT B = static_cast<const char *>(_B);
-
- const int m0 = std::min(M, TILE_M);
- const int m1 = std::max(M - TILE_M, 0);
- const int lda = KB * sizeof(TA);
- //const int ldb = KB * sizeof(TB);
-
- static thread_local packed_B_t Tile0[TILE_N * TILE_K];
- static thread_local packed_B_t Tile1[TILE_N * TILE_K];
- static thread_local int8_t Tile23[TILE_M * TILE_K];
-
- static thread_local int32_t TileC0[TILE_M * TILE_N * 4];
- static thread_local int32_t TileC1[TILE_M * TILE_N * 4];
-
- // double buffering C to interleave avx512 and amx
- int32_t * C_cur = TileC0;
- int32_t * C_pre = TileC1;
-
- auto Tile4 = [&](int32_t * base) { return base; };
- auto Tile5 = [&](int32_t * base) { return base + TILE_M * TILE_N; };
- auto Tile6 = [&](int32_t * base) { return base + 2 * TILE_M * TILE_N; };
- auto Tile7 = [&](int32_t * base) { return base + 3 * TILE_M * TILE_N; };
-
- if (M == 2 * TILE_M) {
- // i = 0
- const char * B_blk0 = B + PACKED_INDEX(0, 0, KB, TILE_SIZE);
- const char * B_blk1 = B + PACKED_INDEX(1, 0, KB, TILE_SIZE);
- if (need_unpack) {
- unpack_B<TB>(Tile0, B_blk0);
- _tile_loadd(TMM0, Tile0, TILE_N * VNNI_BLK);
- } else {
- _tile_loadd(TMM0, B_blk0, TILE_N * VNNI_BLK);
- }
-
- _tile_zero(TMM4);
- _tile_loadd(TMM2, A[0].qs, lda);
- _tile_dpbssd(TMM4, TMM2, TMM0);
- _tile_stored(TMM4, Tile4(C_pre), TILE_N * sizeof(int32_t));
-
- _tile_zero(TMM5);
- _tile_loadd(TMM3, A[TILE_M * KB + 0].qs, lda);
- _tile_dpbssd(TMM5, TMM3, TMM0);
- _tile_stored(TMM5, Tile5(C_pre), TILE_N * sizeof(int32_t));
-
- if (need_unpack) {
- unpack_B<TB>(Tile1, B_blk0);
- _tile_loadd(TMM1, Tile1, TILE_N * VNNI_BLK);
- } else {
- _tile_loadd(TMM1, B_blk1, TILE_N * VNNI_BLK);
- }
-
- _tile_zero(TMM6);
- _tile_dpbssd(TMM6, TMM2, TMM1);
- _tile_stored(TMM6, Tile6(C_pre), TILE_N * sizeof(int32_t));
-
- _tile_zero(TMM7);
- _tile_dpbssd(TMM7, TMM3, TMM1);
- _tile_stored(TMM7, Tile7(C_pre), TILE_N * sizeof(int32_t));
-
- for (int i = 1; i < KB; ++i) {
- // index of previous iter
- const int ii = i - 1;
- const char * B_blk0 = B + PACKED_INDEX(0, i, KB, TILE_SIZE);
- const char * B_blk1 = B + PACKED_INDEX(1, i, KB, TILE_SIZE);
- GGML_DISPATCH_BOOL(ii > 0, is_acc, [&] {
- if (need_unpack) {
- unpack_B<TB>(Tile0, B_blk0);
- _tile_loadd(TMM0, Tile0, TILE_N * VNNI_BLK);
- } else {
- _tile_loadd(TMM0, B_blk0, TILE_N * VNNI_BLK);
- }
- _tile_zero(TMM4);
- _tile_loadd(TMM2, A[i].qs, lda);
- acc_C<TA, TB, is_acc>::apply(C, ldc, Tile4(C_pre), &A[ii], KB, B + PACKED_INDEX(0, ii, KB, TILE_SIZE), TILE_M);
-
- _tile_dpbssd(TMM4, TMM2, TMM0);
- _tile_stored(TMM4, Tile4(C_cur), TILE_N * sizeof(int32_t));
-
- _tile_zero(TMM5);
- _tile_loadd(TMM3, A[TILE_M * KB + i].qs, lda);
- acc_C<TA, TB, is_acc>::apply(C + TILE_M * ldc, ldc, Tile5(C_pre), &A[TILE_M * KB + ii], KB, B + PACKED_INDEX(0, ii, KB, TILE_SIZE), TILE_M);
-
- _tile_dpbssd(TMM5, TMM3, TMM0);
- _tile_stored(TMM5, Tile5(C_cur), TILE_N * sizeof(int32_t));
-
- if (need_unpack) {
- unpack_B<TB>(Tile1, B_blk1);
- _tile_loadd(TMM1, Tile1, TILE_N * VNNI_BLK);
- } else {
- _tile_loadd(TMM1, B_blk1, TILE_N * VNNI_BLK);
- }
- _tile_zero(TMM6);
- acc_C<TA, TB, is_acc>::apply(C + TILE_N, ldc, Tile6(C_pre), &A[ii], KB, B + PACKED_INDEX(1, ii, KB, TILE_SIZE), TILE_M);
-
- _tile_dpbssd(TMM6, TMM2, TMM1);
- _tile_stored(TMM6, Tile6(C_cur), TILE_N * sizeof(int32_t));
-
- _tile_zero(TMM7);
- acc_C<TA, TB, is_acc>::apply(C + TILE_M * ldc + TILE_N, ldc, Tile7(C_pre), &A[TILE_M * KB + ii], KB, B + PACKED_INDEX(1, ii, KB, TILE_SIZE), TILE_M);
-
- _tile_dpbssd(TMM7, TMM3, TMM1);
- _tile_stored(TMM7, Tile7(C_cur), TILE_N * sizeof(int32_t));
-
- std::swap(C_cur, C_pre);
- });
- }
- // final accumulation
- {
- int ii = KB - 1;
- acc_C<TA, TB, true>::apply(C, ldc, Tile4(C_pre), &A[ii], KB, B + PACKED_INDEX(0, ii, KB, TILE_SIZE), TILE_M);
- acc_C<TA, TB, true>::apply(C + TILE_M * ldc, ldc, Tile5(C_pre), &A[TILE_M * KB + ii], KB, B + PACKED_INDEX(0, ii, KB, TILE_SIZE), TILE_M);
- acc_C<TA, TB, true>::apply(C + TILE_N, ldc, Tile6(C_pre), &A[ii], KB, B + PACKED_INDEX(1, ii, KB, TILE_SIZE), TILE_M);
- acc_C<TA, TB, true>::apply(C + TILE_M * ldc + TILE_N, ldc, Tile7(C_pre), &A[TILE_M * KB + ii], KB, B + PACKED_INDEX(1, ii, KB, TILE_SIZE), TILE_M);
- }
- } else {
- for (int i = 0; i < KB; ++i) {
- _tile_zero(TMM4);
- _tile_zero(TMM6);
- if (m1 != 0) {
- _tile_zero(TMM5);
- _tile_zero(TMM7);
- }
-
- const char * B_blk0 = B + PACKED_INDEX(0, i, KB, TILE_SIZE);
- const char * B_blk1 = B + PACKED_INDEX(1, i, KB, TILE_SIZE);
- if (need_unpack) {
- unpack_B<TB>(Tile0, B_blk0);
- _tile_loadd(TMM0, Tile0, TILE_N * VNNI_BLK);
- } else {
- _tile_loadd(TMM0, B_blk0, TILE_N * VNNI_BLK);
- }
-
- if (need_unpack) {
- unpack_B<TB>(Tile1, B_blk1);
- _tile_loadd(TMM1, Tile1, TILE_N * VNNI_BLK);
- } else {
- _tile_loadd(TMM1, B_blk1, TILE_N * VNNI_BLK);
- }
-
- if (m0 == TILE_M) {
- _tile_loadd(TMM2, A[i].qs, lda);
- } else {
- unpack_A(Tile23, &A[i], KB, m0);
- _tile_loadd(TMM2, Tile23, TILE_K);
- }
-
- _tile_dpbssd(TMM4, TMM2, TMM0);
- _tile_dpbssd(TMM6, TMM2, TMM1);
-
- _tile_stored(TMM4, Tile4(C_cur), TILE_N * sizeof(int32_t));
- _tile_stored(TMM6, Tile6(C_cur), TILE_N * sizeof(int32_t));
-
- GGML_DISPATCH_BOOL(i > 0, is_acc, [&] {
- acc_C<TA, TB, is_acc>::apply(C, ldc, Tile4(C_cur), &A[i], KB, B + PACKED_INDEX(0, i, KB, TILE_SIZE), m0);
- acc_C<TA, TB, is_acc>::apply(C + TILE_N, ldc, Tile6(C_cur), &A[i], KB, B + PACKED_INDEX(1, i, KB, TILE_SIZE), m0);
- });
-
- if (m1 != 0) {
- unpack_A(Tile23, &A[TILE_M * KB + i], KB, m1);
- _tile_loadd(TMM3, Tile23, TILE_K);
-
- _tile_dpbssd(TMM5, TMM3, TMM0);
- _tile_dpbssd(TMM7, TMM3, TMM1);
- _tile_stored(TMM5, Tile5(C_cur), TILE_N * sizeof(int32_t));
- _tile_stored(TMM7, Tile7(C_cur), TILE_N * sizeof(int32_t));
- GGML_DISPATCH_BOOL(i > 0, is_acc, [&] {
- acc_C<TA, TB, is_acc>::apply(C + TILE_M * ldc, ldc, Tile5(C_cur), &A[TILE_M * KB + i], KB, B + PACKED_INDEX(0, i, KB, TILE_SIZE), m1);
- acc_C<TA, TB, is_acc>::apply(C + TILE_M * ldc + TILE_N, ldc, Tile7(C_cur), &A[TILE_M * KB + i], KB, B + PACKED_INDEX(1, i, KB, TILE_SIZE), m1);
- });
- }
- }
- }
- return;
-}
-
-template <typename TA, typename TB, typename TC, int BLOCK_K,
- typename std::enable_if<is_type_qkk<TB>::value, int>::type = 0>
-void tinygemm_kernel_amx(int M, int N, int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
- static_assert(std::is_same<TA, block_q8_K>::value);
- const int TILE_SIZE = get_tile_size<TB>();
-
- GGML_ASSERT(M <= 2 * TILE_M && N == 2 * TILE_N);
- const TA * RESTRICT A = static_cast<const TA *>(_A);
- const char * RESTRICT B = static_cast<const char *>(_B);
-
- const int m0 = std::min(M, TILE_M);
- const int m1 = std::max(M - TILE_M, 0);
- //const int lda = KB * sizeof(TA);
-
- static thread_local int8_t Tile0[TILE_N * TILE_K];
- static thread_local int8_t Tile1[TILE_N * TILE_K];
- static thread_local int8_t Tile23[TILE_M * TILE_K];
-
- // mat mul result for each group
- static thread_local int32_t Tile4[TILE_M * TILE_N];
- static thread_local int32_t Tile5[TILE_M * TILE_N];
- static thread_local int32_t Tile6[TILE_M * TILE_N];
- static thread_local int32_t Tile7[TILE_M * TILE_N];
-
- // sum of each QK_K block, contains 8 groups, int32
- static thread_local int32_t Sumi4[TILE_M * TILE_N];
- static thread_local int32_t Sumi5[TILE_M * TILE_N];
- static thread_local int32_t Sumi6[TILE_M * TILE_N];
- static thread_local int32_t Sumi7[TILE_M * TILE_N];
-
- const int k_group_size = std::is_same<TB, block_q6_K>::value ? 16 : 32;
- for (int i = 0; i < KB; ++i) {
- // step 1: accumulate the quants across 8 groups, each group with 32
- for (int k = 0; k < QK_K / k_group_size; ++k) {
- GGML_DISPATCH_BOOL(k > 0, is_acc, [&] {
- _tile_zero(TMM4);
- _tile_zero(TMM6);
-
- unpack_B<TB>(Tile0, B + PACKED_INDEX(0, i, KB, TILE_SIZE), k);
- _tile_loadd(TMM0, Tile0, TILE_N * VNNI_BLK);
-
- unpack_B<TB>(Tile1, B + PACKED_INDEX(1, i, KB, TILE_SIZE), k);
- _tile_loadd(TMM1, Tile1, TILE_N * VNNI_BLK);
-
- unpack_A<TB>(Tile23, &A[i], KB, k, m0);
- _tile_loadd(TMM2, Tile23, TILE_K);
-
- _tile_dpbssd(TMM4, TMM2, TMM0);
- _tile_dpbssd(TMM6, TMM2, TMM1);
-
- _tile_stored(TMM4, Tile4, TILE_N * sizeof(int32_t));
- _tile_stored(TMM6, Tile6, TILE_N * sizeof(int32_t));
-
- scale_C<TB, is_acc>(Tile4, Sumi4, B + PACKED_INDEX(0, i, KB, TILE_SIZE), k, m0);
- scale_C<TB, is_acc>(Tile6, Sumi6, B + PACKED_INDEX(1, i, KB, TILE_SIZE), k, m0);
-
- if (m1 != 0) {
- _tile_zero(TMM5);
- _tile_zero(TMM7);
-
- unpack_A<TB>(Tile23, &A[TILE_M * KB + i], KB, k, m1);
- _tile_loadd(TMM3, Tile23, TILE_K);
-
- _tile_dpbssd(TMM5, TMM3, TMM0);
- _tile_dpbssd(TMM7, TMM3, TMM1);
-
- _tile_stored(TMM5, Tile5, TILE_N * sizeof(int32_t));
- _tile_stored(TMM7, Tile7, TILE_N * sizeof(int32_t));
-
- scale_C<TB, is_acc>(Tile5, Sumi5, B + PACKED_INDEX(0, i, KB, TILE_SIZE), k, m1);
- scale_C<TB, is_acc>(Tile7, Sumi7, B + PACKED_INDEX(1, i, KB, TILE_SIZE), k, m1);
- }
- });
- }
-
- // step 2: accmulate the mins
- GGML_DISPATCH_BOOL(i > 0, is_acc, [&] {
- acc_C<TA, TB, is_acc>::apply(C, ldc, Sumi4, &A[i], KB, B + PACKED_INDEX(0, i, KB, TILE_SIZE), m0);
- acc_C<TA, TB, is_acc>::apply(C + TILE_N, ldc, Sumi6, &A[i], KB, B + PACKED_INDEX(1, i, KB, TILE_SIZE), m0);
- if (m1 != 0) {
- acc_C<TA, TB, is_acc>::apply(C + TILE_M * ldc, ldc, Sumi5, &A[TILE_M * KB + i], KB, B + PACKED_INDEX(0, i, KB, TILE_SIZE), m1);
- acc_C<TA, TB, is_acc>::apply(C + TILE_M * ldc + TILE_N, ldc, Sumi7, &A[TILE_M * KB + i], KB, B + PACKED_INDEX(1, i, KB, TILE_SIZE), m1);
- }
- });
- }
- return;
-}
-
-} // anonymous namespace
-
-// get the packed tensor size for quantized weights
-size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor) {
- const enum ggml_type TYPE = tensor->type;
-
- const int K = tensor->ne[0]; // ne0: in_features
- const int N = tensor->ne[1]; // ne1: out_features
-
- auto get_tensor_size = [&] {
- size_t row_size_B{0};
- GGML_DISPATCH_QTYPES(TYPE, [&] {
- row_size_B = get_row_size<type, blck_size>(K);
- });
- return N * row_size_B;
- };
-
- if (qtype_has_amx_kernels(TYPE)) {
- return get_tensor_size();
- } else {
- // for f16, bf16 we don't do packing
- return ggml_nbytes(tensor);
- }
-}
-
-// pack weight to vnni format
-void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
-
- size_t alloc_size = ggml_backend_amx_get_alloc_size(tensor);
- GGML_ASSERT(alloc_size == size);
-
- const enum ggml_type TYPE = tensor->type;
-
- const int K = tensor->ne[0]; // ne0: in_features
- const int N = tensor->ne[1]; // ne1: out_features
-
-#if defined(_OPENMP)
- // the buffer ctx is not initialized when .set_tensor is called
- int n_threads = omp_get_num_threads();
-#else
- int n_threads = 1;
-#endif
-
- GGML_DISPATCH_QTYPES(TYPE, [&] {
- convert_B_packed_format<type, blck_size>((void *)((char *)tensor->data + offset), (const type *)data, N, K, n_threads);
- });
-}
-
-// NB: mixed dtype gemm with Advanced Matrix Extensions (Intel AMX)
-//
-// src0: weight in shape of {N, K}, quantized
-// src1: input in shape of {M, K}, float32
-// dst: output in shape of {M, N}, float32
-//
-// the function performs: dst = src1 @ src0.T
-//
-void ggml_backend_amx_mul_mat(ggml_backend_amx_context * ctx, struct ggml_tensor * dst) {
- struct ggml_tensor * src0 = dst->src[0];
- struct ggml_tensor * src1 = dst->src[1];
-
- const enum ggml_type TYPE = src0->type;
-
- const int n_threads = ctx->n_threads;
-
- // f16 only has avx512 kernels for now,
- // amx kernels will be added once 6th gen xeon is released.
- const bool is_floating_type = TYPE == GGML_TYPE_F16;
-
- const int M = dst->ne[1];
- const int N = dst->ne[0];
- const int K = src0->ne[0];
- const int ldc = dst->nb[1] / dst->nb[0];
-
- if (is_floating_type) {
- constexpr int BLOCK_M = 4;
- constexpr int BLOCK_N = 6;
- const int MB = div_up(M, BLOCK_M);
- const int NB = div_up(N, BLOCK_N);
-
- parallel_for(n_threads, MB * NB, [&](int begin, int end) {
- GGML_DISPATCH_FLOATING_TYPES(TYPE, [&] {
- for (int i = begin; i < end; ++i) {
- int mb = i / NB;
- int nb = i % NB;
-
- int mb_start = mb * BLOCK_M;
- int mb_size = std::min(BLOCK_M, M - mb_start);
- int nb_start = nb * BLOCK_N;
- int nb_size = std::min(BLOCK_N, N - nb_start);
-
- switch (mb_size << 4 | nb_size) {
- case 0x12: LAUNCH_TINYGEMM_KERNEL_AVX(1, 2); break;
- case 0x14: LAUNCH_TINYGEMM_KERNEL_AVX(1, 4); break;
- case 0x16: LAUNCH_TINYGEMM_KERNEL_AVX(1, 6); break;
- case 0x22: LAUNCH_TINYGEMM_KERNEL_AVX(2, 2); break;
- case 0x24: LAUNCH_TINYGEMM_KERNEL_AVX(2, 4); break;
- case 0x26: LAUNCH_TINYGEMM_KERNEL_AVX(2, 6); break;
- case 0x32: LAUNCH_TINYGEMM_KERNEL_AVX(3, 2); break;
- case 0x34: LAUNCH_TINYGEMM_KERNEL_AVX(3, 4); break;
- case 0x36: LAUNCH_TINYGEMM_KERNEL_AVX(3, 6); break;
- case 0x42: LAUNCH_TINYGEMM_KERNEL_AVX(4, 2); break;
- case 0x44: LAUNCH_TINYGEMM_KERNEL_AVX(4, 4); break;
- case 0x46: LAUNCH_TINYGEMM_KERNEL_AVX(4, 6); break;
- default: fprintf(stderr, "Unexpected block size!\n");
- }
- }
- });
- });
- return;
- }
-
- // pointer to work space, used convert A from float to quantized type
- void * wdata = nullptr;
-
- //TODO: performance improvement: merge quant A
- GGML_DISPATCH_QTYPES(TYPE, [&] {
- const size_t row_size_A = K / blck_size * sizeof(vec_dot_type);
- const size_t desired_wsize = M * row_size_A;
- if (ctx->work_size < desired_wsize) {
- ctx->work_data.reset(new char[desired_wsize]);
- ctx->work_size = desired_wsize;
- }
- wdata = ctx->work_data.get();
-
- // Q4_0, Q4_1, Q8_0 handles 1 TILE_K per blck_size
- // Q4_K, Q5_K, Q6_K, IQ4_XS handles 8 TILE_K per blck_size
- GGML_ASSERT(TILE_K == blck_size || TILE_K * 8 == blck_size);
-
- const float * A_data = static_cast<const float *>(src1->data);
- for (int m = 0; m < M; ++m) {
- from_float<vec_dot_type>(A_data + m * K, (char *)wdata + m * row_size_A, K);
- }
- });
-
- if (M == 1) {
- // MB = 1 and handle 8 tiles in each block
- constexpr int kTilesN = 4;
- constexpr int BLOCK_N = TILE_N * kTilesN;
- const int NB = div_up(N, BLOCK_N);
-
- parallel_for(n_threads, NB, [&](int begin, int end) {
- GGML_DISPATCH_QTYPES(TYPE, [&] {
- const int KB = K / blck_size;
- const int TILE_SIZE = get_tile_size<type>();
- const int row_size_A = KB * sizeof(vec_dot_type);
- for (int i = begin; i < end; ++i) {
- int nb = i;
- int nb_start = nb * BLOCK_N;
- int nb_size = std::min(BLOCK_N, N - nb_start); // 32, 64, 96
-
- switch (nb_size) {
- //case 160: LAUNCH_TINYGEMM_KERNEL_VNNI(160); break;
- case 128: LAUNCH_TINYGEMM_KERNEL_VNNI(128); break;
- case 96: LAUNCH_TINYGEMM_KERNEL_VNNI(96); break;
- case 64: LAUNCH_TINYGEMM_KERNEL_VNNI(64); break;
- case 32: LAUNCH_TINYGEMM_KERNEL_VNNI(32); break;
- default: fprintf(stderr, "Unexpected n block size!\n");
- }
- }
- });
- });
- return;
- }
-
- // handle 4 tiles at a tile
- constexpr int BLOCK_M = TILE_M * 2;
- constexpr int BLOCK_N = TILE_N * 2;
- const int MB = div_up(M, BLOCK_M);
- const int NB = div_up(N, BLOCK_N);
-
- parallel_for(n_threads, MB * NB, [&](int begin, int end) {
- // init tile config for each thread
- ggml_tile_config_init();
-
- GGML_DISPATCH_QTYPES(TYPE, [&] {
- const int KB = K / blck_size;
- const int TILE_SIZE = get_tile_size<type>();
- const int row_size_A = KB * sizeof(vec_dot_type);
-
- for (int i = begin; i < end; ++i) {
- int mb = i / NB;
- int nb = i % NB;
-
- int mb_start = mb * BLOCK_M;
- int mb_size = std::min(BLOCK_M, M - mb_start);
- int nb_start = nb * BLOCK_N;
- int nb_size = BLOCK_N;
-
- tinygemm_kernel_amx<vec_dot_type, type, float, blck_size>(
- mb_size, nb_size, KB,
- (const char *)wdata + mb_start * row_size_A,
- (const char *)src0->data + PACKED_INDEX(nb * 2, 0, KB, TILE_SIZE),
- (float *) dst->data + mb_start * N + nb_start, ldc);
- }
- });
- });
-}
-
-#else // if defined(__AMX_INT8__)
-
-void ggml_backend_amx_mul_mat(ggml_backend_amx_context * ctx, struct ggml_tensor * dst) {
- fprintf(stderr, "GGML is not compiled with AMX support!\n");
-
- GGML_UNUSED(ctx);
- GGML_UNUSED(dst);
-}
-
-#endif // if defined(__AMX_INT8__)
+++ /dev/null
-#pragma once
-#include "common.h"
-#include <stdint.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor);
-
-void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
-
-void ggml_backend_amx_mul_mat(ggml_backend_amx_context * ctx, struct ggml_tensor * dst);
-
-#ifdef __cplusplus
-}
-#endif
#include "ggml-rpc.h"
#endif
-#ifdef GGML_USE_AMX
-# include "ggml-amx.h"
-#endif
-
#ifdef GGML_USE_CANN
#include "ggml-cann.h"
#endif
#ifdef GGML_USE_RPC
register_backend(ggml_backend_rpc_reg());
#endif
-#ifdef GGML_USE_AMX
- register_backend(ggml_backend_amx_reg());
-#endif
#ifdef GGML_USE_KOMPUTE
register_backend(ggml_backend_kompute_reg());
#endif
if (tensor->buffer || (tensor->view_src && tensor->view_src->buffer)) {
// since the tensor is pre-allocated, it cannot be moved to another backend
- GGML_ABORT("pre-allocated tensor (%s) in a backend that cannot run the operation", tensor->name);
+ ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
+ GGML_ABORT("pre-allocated tensor (%s) in a buffer (%s) that cannot run the operation (%s)", tensor->name, ggml_backend_buffer_name(buffer), ggml_op_name(tensor->op));
}
// graph input
-ggml_add_backend_library(ggml-cpu
- ggml-cpu.c
- ggml-cpu.cpp
- ggml-cpu-aarch64.c
- ggml-cpu-aarch64.h
- ggml-cpu-quants.c
- ggml-cpu-quants.h
- )
-
+ggml_add_backend_library(ggml-cpu)
+
+list (APPEND GGML_CPU_SOURCES
+ ggml-cpu.c
+ ggml-cpu.cpp
+ ggml-cpu-aarch64.c
+ ggml-cpu-aarch64.h
+ ggml-cpu-quants.c
+ ggml-cpu-quants.h
+ amx/amx.cpp
+ amx/amx.h
+ amx/mmq.cpp
+ amx/mmq.h
+ ggml-cpu-impl.h
+ )
+
+target_compile_features(ggml-cpu PRIVATE c_std_11 cxx_std_17)
target_include_directories(ggml-cpu PRIVATE .)
if (APPLE AND GGML_ACCELERATE)
if (ACCELERATE_FRAMEWORK)
message(STATUS "Accelerate framework found")
- add_compile_definitions(GGML_USE_ACCELERATE)
- add_compile_definitions(ACCELERATE_NEW_LAPACK)
- add_compile_definitions(ACCELERATE_LAPACK_ILP64)
+ target_compile_definitions(ggml-cpu PRIVATE GGML_USE_ACCELERATE)
+ target_compile_definitions(ggml-cpu PRIVATE ACCELERATE_NEW_LAPACK)
+ target_compile_definitions(ggml-cpu PRIVATE ACCELERATE_LAPACK_ILP64)
target_link_libraries(ggml-cpu PRIVATE ${ACCELERATE_FRAMEWORK})
else()
if (OpenMP_FOUND)
message(STATUS "OpenMP found")
- add_compile_definitions(GGML_USE_OPENMP)
+ target_compile_definitions(ggml-cpu PRIVATE GGML_USE_OPENMP)
target_link_libraries(ggml-cpu PRIVATE OpenMP::OpenMP_C OpenMP::OpenMP_CXX)
-
- # FIXME: should be replaced with a compiler id check
- #if (GGML_MUSA)
- # list(APPEND GGML_CPU_EXTRA_INCLUDES "/usr/lib/llvm-14/lib/clang/14.0.0/include")
- # list(APPEND GGML_CPU_EXTRA_LIBS_PRIVATE "/usr/lib/llvm-14/lib/libomp.so")
- #endif()
else()
message(WARNING "OpenMP not found")
endif()
if (GGML_LLAMAFILE)
message(STATUS "Using llamafile")
- add_compile_definitions(GGML_USE_LLAMAFILE)
+ target_compile_definitions(ggml-cpu PRIVATE GGML_USE_LLAMAFILE)
- target_sources(ggml-cpu PRIVATE
- llamafile/sgemm.cpp
- llamafile/sgemm.h)
+ list(APPEND GGML_CPU_SOURCES
+ llamafile/sgemm.cpp
+ llamafile/sgemm.h)
endif()
if (GGML_CPU_HBM)
message(STATUS "Using memkind for CPU HBM")
- add_compile_definitions(GGML_USE_CPU_HBM)
+ target_compile_definitions(ggml-cpu PRIVATE GGML_USE_CPU_HBM)
target_link_libraries(ggml-cpu PUBLIC memkind)
endif()
message(STATUS "ARM detected")
if (MSVC)
- add_compile_definitions(__aarch64__) # MSVC defines _M_ARM64 instead
- add_compile_definitions(__ARM_NEON)
- add_compile_definitions(__ARM_FEATURE_FMA)
+ list(APPEND ARCH_DEFINITIONS __aarch64__) # MSVC defines _M_ARM64 instead
+ list(APPEND ARCH_DEFINITIONS __ARM_NEON)
+ list(APPEND ARCH_DEFINITIONS __ARM_FEATURE_FMA)
set(CMAKE_REQUIRED_FLAGS_PREV ${CMAKE_REQUIRED_FLAGS})
string(JOIN " " CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS} "/arch:armv8.2")
check_cxx_source_compiles("#include <arm_neon.h>\nint main() { int8x16_t _a, _b; int32x4_t _s = vdotq_s32(_s, _a, _b); return 0; }" GGML_COMPILER_SUPPORT_DOTPROD)
if (GGML_COMPILER_SUPPORT_DOTPROD)
- add_compile_definitions(__ARM_FEATURE_DOTPROD)
+ list(APPEND ARCH_DEFINITIONS __ARM_FEATURE_DOTPROD)
message(STATUS "ARM feature DOTPROD enabled")
endif ()
check_cxx_source_compiles("#include <arm_neon.h>\nint main() { int8x16_t _a, _b; int32x4_t _s = vmmlaq_f32(_s, _a, _b); return 0; }" GGML_COMPILER_SUPPORT_MATMUL_INT8)
if (GGML_COMPILER_SUPPORT_MATMUL_INT8)
- add_compile_definitions(__ARM_FEATURE_MATMUL_INT8)
+ list(APPEND ARCH_DEFINITIONS __ARM_FEATURE_MATMUL_INT8)
message(STATUS "ARM feature MATMUL_INT8 enabled")
endif ()
check_cxx_source_compiles("#include <arm_neon.h>\nint main() { float16_t _a; float16x8_t _s = vdupq_n_f16(_a); return 0; }" GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC)
if (GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC)
- add_compile_definitions(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+ list(APPEND ARCH_DEFINITIONS __ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
message(STATUS "ARM feature FP16_VECTOR_ARITHMETIC enabled")
endif ()
check_cxx_source_compiles("#include <arm_neon.h>\nint main() { int8x16_t _a, _b; int32x4_t _s = vdotq_s32(_s, _a, _b); return 0; }" GGML_COMPILER_SUPPORT_DOTPROD)
if (GGML_COMPILER_SUPPORT_DOTPROD)
set(MARCH_FLAGS "${MARCH_FLAGS}+dotprod")
- add_compile_definitions(__ARM_FEATURE_DOTPROD)
+ list(APPEND ARCH_DEFINITIONS __ARM_FEATURE_DOTPROD)
message(STATUS "ARM feature DOTPROD enabled")
endif ()
check_cxx_source_compiles("#include <arm_neon.h>\nint main() { int8x16_t _a, _b; int32x4_t _s = vmmlaq_s32(_s, _a, _b); return 0; }" GGML_COMPILER_SUPPORT_MATMUL_INT8)
if (GGML_COMPILER_SUPPORT_MATMUL_INT8)
set(MARCH_FLAGS "${MARCH_FLAGS}+i8mm")
- add_compile_definitions(__ARM_FEATURE_MATMUL_INT8)
+ list(APPEND ARCH_DEFINITIONS __ARM_FEATURE_MATMUL_INT8)
message(STATUS "ARM feature MATMUL_INT8 enabled")
endif ()
if (MSVC)
# instruction set detection for MSVC only
if (GGML_NATIVE)
- # TODO: improve, should not reference files from the parent folder
include(cmake/FindSIMD.cmake)
endif ()
if (GGML_AVX512)
# macros corresponding to the extensions.
# Do it manually.
if (GGML_AVX512_VBMI)
- add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VBMI__>)
- add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VBMI__>)
+ list(APPEND ARCH_DEFINITIONS __AVX512VBMI__)
if (CMAKE_C_COMPILER_ID STREQUAL "Clang")
list(APPEND ARCH_FLAGS -mavx512vbmi)
endif()
endif()
if (GGML_AVX512_VNNI)
- add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>)
- add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>)
+ list(APPEND ARCH_DEFINITIONS __AVX512VNNI__)
if (CMAKE_C_COMPILER_ID STREQUAL "Clang")
list(APPEND ARCH_FLAGS -mavx512vnni)
endif()
endif()
if (GGML_AVX512_BF16)
- add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512BF16__>)
- add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512BF16__>)
+ list(APPEND ARCH_DEFINITIONS __AVX512BF16__)
if (CMAKE_C_COMPILER_ID STREQUAL "Clang")
list(APPEND ARCH_FLAGS -mavx512bf16)
endif()
endif()
if (GGML_AMX_TILE)
- add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AMX_TILE__>)
- add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AMX_TILE__>)
+ list(APPEND ARCH_DEFINITIONS __AMX_TILE__)
endif()
if (GGML_AMX_INT8)
- add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AMX_INT8__>)
- add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AMX_INT8__>)
+ list(APPEND ARCH_DEFINITIONS __AMX_INT8__)
endif()
if (GGML_AMX_BF16)
- add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AMX_BF16__>)
- add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AMX_BF16__>)
+ list(APPEND ARCH_DEFINITIONS __AMX_BF16__)
endif()
elseif (GGML_AVX2)
list(APPEND ARCH_FLAGS /arch:AVX2)
list(APPEND ARCH_FLAGS -mcpu=powerpc64le)
else()
list(APPEND ARCH_FLAGS -mcpu=native -mtune=native)
- #TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be)
+ # TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be)
endif()
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64")
message(STATUS "loongarch64 detected")
if (GGML_CPU_AARCH64)
message(STATUS "Using runtime weight conversion of Q4_0 to Q4_0_x_x to enable optimized GEMM/GEMV kernels")
- add_compile_definitions(GGML_USE_CPU_AARCH64)
+ target_compile_definitions(ggml-cpu PRIVATE GGML_USE_CPU_AARCH64)
endif()
-target_compile_options(ggml-cpu PRIVATE "$<$<COMPILE_LANGUAGE:CXX>:${ARCH_FLAGS}>")
-target_compile_options(ggml-cpu PRIVATE "$<$<COMPILE_LANGUAGE:C>:${ARCH_FLAGS}>")
+target_sources(ggml-cpu PRIVATE ${GGML_CPU_SOURCES})
+set_source_files_properties(${GGML_CPU_SOURCES} PROPERTIES COMPILE_OPTIONS "${ARCH_FLAGS}")
+set_source_files_properties(${GGML_CPU_SOURCES} PROPERTIES COMPILE_DEFINITIONS "${ARCH_DEFINITIONS}")
if (EMSCRIPTEN)
set_target_properties(ggml-cpu PROPERTIES COMPILE_FLAGS "-msimd128")
--- /dev/null
+#include "amx.h"
+#include "common.h"
+#include "mmq.h"
+#include "ggml-backend-impl.h"
+#include "ggml-backend.h"
+#include "ggml-impl.h"
+#include "ggml-cpu.h"
+
+#if defined(__gnu_linux__)
+#include <sys/syscall.h>
+#include <unistd.h>
+#endif
+
+#include <cstdlib>
+#include <cstring>
+#include <memory>
+
+#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
+
+// AMX buffer interface
+static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) {
+ free(buffer->context);
+}
+
+static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) {
+ return (void *)(buffer->context);
+}
+
+static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
+ memset((char *)tensor->data + offset, value, size);
+
+ GGML_UNUSED(buffer);
+}
+
+static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ if (qtype_has_amx_kernels(tensor->type)) {
+ ggml_backend_amx_convert_weight(tensor, data, offset, size);
+ } else {
+ memcpy((char *)tensor->data + offset, data, size);
+ }
+
+ GGML_UNUSED(buffer);
+}
+
+static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ GGML_ASSERT(!qtype_has_amx_kernels(tensor->type));
+ memcpy(data, (const char *)tensor->data + offset, size);
+
+ GGML_UNUSED(buffer);
+}
+
+static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
+ if (ggml_backend_buffer_is_host(src->buffer)) {
+ if (qtype_has_amx_kernels(src->type)) {
+ ggml_backend_amx_convert_weight(dst, src->data, 0, ggml_nbytes(dst));
+ } else {
+ memcpy(dst->data, src->data, ggml_nbytes(src));
+ }
+ return true;
+ }
+ return false;
+
+ GGML_UNUSED(buffer);
+}
+
+static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
+ memset(buffer->context, value, buffer->size);
+}
+
+static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = {
+ /* .free_buffer = */ ggml_backend_amx_buffer_free_buffer,
+ /* .get_base = */ ggml_backend_amx_buffer_get_base,
+ /* .init_tensor = */ NULL, // no initialization required
+ /* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor,
+ /* .set_tensor = */ ggml_backend_amx_buffer_set_tensor,
+ /* .get_tensor = */ ggml_backend_amx_buffer_get_tensor,
+ /* .cpy_tensor = */ ggml_backend_amx_buffer_cpy_tensor,
+ /* .clear = */ ggml_backend_amx_buffer_clear,
+ /* .reset = */ NULL,
+};
+
+static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
+ return "AMX";
+
+ GGML_UNUSED(buft);
+}
+
+static ggml_backend_buffer_t ggml_backend_amx_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+ void * data = aligned_alloc(TENSOR_ALIGNMENT, size);
+ if (data == NULL) {
+ fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size);
+ return NULL;
+ }
+
+ return ggml_backend_buffer_init(buft, ggml_backend_amx_buffer_interface, data, size);
+}
+
+static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
+ return TENSOR_ALIGNMENT;
+
+ GGML_UNUSED(buft);
+}
+
+static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor* tensor) {
+ return ggml_backend_amx_get_alloc_size(tensor);
+
+ GGML_UNUSED(buft);
+}
+
+static bool ggml_backend_amx_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
+ return false;
+
+ GGML_UNUSED(buft);
+}
+
+#define ARCH_GET_XCOMP_PERM 0x1022
+#define ARCH_REQ_XCOMP_PERM 0x1023
+#define XFEATURE_XTILECFG 17
+#define XFEATURE_XTILEDATA 18
+
+static bool ggml_amx_init() {
+#if defined(__gnu_linux__)
+ if (syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA)) {
+ fprintf(stderr, "AMX is not ready to be used!\n");
+ return false;
+ }
+ return true;
+#elif defined(_WIN32)
+ return true;
+#endif
+}
+ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() {
+ static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = {
+ /* .iface = */ {
+ /* .get_name = */ ggml_backend_amx_buffer_type_get_name,
+ /* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment,
+ /* .get_max_size = */ NULL, // defaults to SIZE_MAX
+ /* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size,
+ /* .is_host = */ ggml_backend_amx_buffer_type_is_host,
+ },
+ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
+ /* .context = */ NULL,
+ };
+
+ if (!ggml_amx_init()) {
+ return NULL;
+ }
+
+ return &ggml_backend_buffer_type_amx;
+}
+
+bool ggml_backend_amx_buft_is_amx(ggml_backend_buffer_type_t buft) {
+ return buft->iface.get_name == ggml_backend_amx_buffer_type_get_name;
+}
+
+bool ggml_backend_amx_device_supports_op(const struct ggml_tensor * op) {
+ // handle only 2d gemm for now
+ auto is_contiguous_2d = [](const struct ggml_tensor * t) {
+ return ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1;
+ };
+
+ switch (op->op) {
+ case GGML_OP_NONE:
+ case GGML_OP_RESHAPE:
+ case GGML_OP_VIEW:
+ case GGML_OP_PERMUTE:
+ case GGML_OP_TRANSPOSE:
+ return true;
+
+ case GGML_OP_MUL_MAT: {
+ const struct ggml_tensor * src0 = op->src[0];
+ const struct ggml_tensor * src1 = op->src[1];
+
+ const enum ggml_type type = src0->type;
+ const int64_t ne0 = op->ne[0];
+
+ // amx kernels enables for Q4_0, Q4_1, Q8_0, F16
+ // Q4_K, Q5_K, Q6_K, IQ4_XS enabled for QK_K = 256
+ bool has_amx_kernels = qtype_has_amx_kernels(type) || (type == GGML_TYPE_F16);
+
+ bool can_use_amx =
+ is_contiguous_2d(src0) && // src0 must be contiguous
+ is_contiguous_2d(src1) && // src1 must be contiguous
+ src1->type == GGML_TYPE_F32 && // src1 must be float32
+ has_amx_kernels && // with amx kernel impls
+ ne0 % (TILE_N * 2) == 0; // out_features is 32x
+
+ return can_use_amx;
+ }
+ default:
+ return false;
+ }
+}
+
+#endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__)
--- /dev/null
+#include "ggml-backend.h"
+#include "ggml-cpu-impl.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
+
+ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void);
+bool ggml_backend_amx_buft_is_amx(ggml_backend_buffer_type_t buft);
+bool ggml_backend_amx_device_supports_op(const struct ggml_tensor * op);
+void ggml_backend_amx_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst);
+size_t ggml_backend_amx_desired_wsize(const struct ggml_tensor * dst);
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+#pragma once
+
+#include "ggml.h"
+#include "ggml-cpu-impl.h"
+
+#include <algorithm>
+#include <memory>
+#include <type_traits>
+
+#if defined(_OPENMP)
+#include <omp.h>
+#endif
+
+#define TILE_M 16
+#define TILE_N 16
+#define TILE_K 32
+#define VNNI_BLK 4
+
+#define AMX_BLK_SIZE 32
+
+#define TMM0 0
+#define TMM1 1
+#define TMM2 2
+#define TMM3 3
+#define TMM4 4
+#define TMM5 5
+#define TMM6 6
+#define TMM7 7
+
+// parallel routines
+template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
+inline T div_up(T x, T y) { return (x + y - 1) / y; }
+
+template <typename T>
+inline void balance211(T n, T nth, T ith, T& n_start, T& n_end) {
+#if 0
+ // onednn partition pattern
+ T& n_my = n_end;
+ if (nth <= 1 || n == 0) {
+ n_start = 0;
+ n_my = n;
+ } else {
+ T n1 = div_up(n, nth);
+ T n2 = n1 - 1;
+ T T1 = n - n2 * nth;
+ n_my = ith < T1 ? n1 : n2;
+ n_start = ith <= T1 ? ith*n1 : T1 * n1 + (ith - T1) * n2;
+ }
+ n_end += n_start;
+#else
+ // pytorch aten partition pattern
+ T n_my = div_up(n, nth);
+ n_start = ith * n_my;
+ n_end = std::min(n_start + n_my, n);
+#endif
+}
+
+template <typename func_t>
+inline void parallel_for(int nth, int n, const func_t& f) {
+#if defined(_OPENMP)
+#pragma omp parallel num_threads(nth)
+{
+ //int nth = omp_get_num_threads();
+ int ith = omp_get_thread_num();
+ int tbegin, tend;
+ balance211(n, nth, ith, tbegin, tend);
+ f(tbegin, tend);
+}
+#else
+ f(0, n);
+
+ GGML_UNUSED(nth);
+#endif
+}
+
+template <typename func_t>
+inline void parallel_for_ggml(const ggml_compute_params * params, int n, const func_t & f) {
+ int tbegin, tend;
+ balance211(n, params->nth, params->ith, tbegin, tend);
+ f(tbegin, tend);
+ ggml_barrier(params->threadpool); // TODO: might not always be needed
+}
+
+// quantized types that have AMX support
+inline bool qtype_has_amx_kernels(const enum ggml_type type) {
+ // TODO: fix padding for vnni format
+ return (type == GGML_TYPE_Q4_0) ||
+ (type == GGML_TYPE_Q4_1) ||
+ (type == GGML_TYPE_Q8_0) ||
+ (type == GGML_TYPE_Q4_K) ||
+ (type == GGML_TYPE_Q5_K) ||
+ (type == GGML_TYPE_Q6_K) ||
+ (type == GGML_TYPE_IQ4_XS);
+}
+
+// ggml backend context
+struct ggml_backend_amx_context {
+ int n_threads = GGML_DEFAULT_N_THREADS;
+ std::unique_ptr<char[]> work_data;
+ size_t work_size = 0;
+};
--- /dev/null
+
+#if defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wpedantic"
+#pragma GCC diagnostic ignored "-Wunused-local-typedefs"
+#endif
+
+#include "amx.h"
+#include "mmq.h"
+#include "ggml-impl.h"
+#include "ggml-cpu-impl.h"
+#include "ggml-cpu-quants.h"
+#include "ggml-quants.h"
+#include <algorithm>
+#include <type_traits>
+
+#if defined(__gnu_linux__)
+#include <sys/syscall.h>
+#include <unistd.h>
+#endif
+
+#if defined(_OPENMP)
+#include <omp.h>
+#endif
+
+#if (defined(_WIN32) || defined(_WIN64))
+#define RESTRICT __restrict
+#else
+#define RESTRICT __restrict__
+#endif
+
+#if (defined(_WIN32) || defined(_WIN64))
+#define ALWAYS_INLINE __forceinline
+#elif __has_attribute(always_inline) || defined(__GNUC__)
+#define ALWAYS_INLINE __attribute__((__always_inline__)) inline
+#else
+#define ALWAYS_INLINE inline
+#endif
+
+#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
+
+namespace {
+
+// Forced unrolling
+template <int n>
+struct Unroll {
+ template <typename Func, typename... Args>
+ ALWAYS_INLINE void operator()(const Func& f, Args... args) const {
+ Unroll<n - 1>{}(f, args...);
+ f(std::integral_constant<int, n - 1>{}, args...);
+ }
+};
+
+template <>
+struct Unroll<1> {
+ template <typename Func, typename... Args>
+ ALWAYS_INLINE void operator()(const Func& f, Args... args) const {
+ f(std::integral_constant<int, 0>{}, args...);
+ }
+};
+
+// type traits
+template <typename T> struct PackedTypes {};
+template <> struct PackedTypes<block_q4_0> { using type = int8_t; };
+template <> struct PackedTypes<block_q4_1> { using type = uint8_t; };
+template <> struct PackedTypes<block_q8_0> { using type = int8_t; };
+template <typename T> using packed_B_type = typename PackedTypes<T>::type;
+
+template <typename T>
+struct do_compensate : std::integral_constant<bool,
+ std::is_same<T, block_q8_0>::value> {};
+
+template <typename T>
+struct do_unpack : std::integral_constant<bool,
+ std::is_same<T, block_q4_0>::value ||
+ std::is_same<T, block_q4_1>::value> {};
+
+template <typename T>
+struct is_type_qkk : std::integral_constant<bool,
+ std::is_same<T, block_q4_K>::value ||
+ std::is_same<T, block_q5_K>::value ||
+ std::is_same<T, block_q6_K>::value ||
+ std::is_same<T, block_iq4_xs>::value> {};
+
+#define GGML_DISPATCH_FLOATING_TYPES(TYPE, ...) \
+ [&] { \
+ switch (TYPE) { \
+ case GGML_TYPE_F16: { \
+ using type = ggml_fp16_t; \
+ constexpr int blck_size = 16; \
+ return __VA_ARGS__(); \
+ } \
+ case GGML_TYPE_BF16: { \
+ using type = ggml_bf16_t; \
+ constexpr int blck_size = 32; \
+ return __VA_ARGS__(); \
+ } \
+ default: \
+ fprintf(stderr, "Unsupported floating data type\n"); \
+ } \
+ }()
+
+#define GGML_DISPATCH_QTYPES(QT, ...) \
+ [&] { \
+ switch (QT) { \
+ case GGML_TYPE_Q4_0: { \
+ using type = block_q4_0; \
+ using vec_dot_type = block_q8_0; \
+ constexpr int blck_size = QK4_0; \
+ return __VA_ARGS__(); \
+ } \
+ case GGML_TYPE_Q4_1: { \
+ using type = block_q4_1; \
+ using vec_dot_type = block_q8_1; \
+ constexpr int blck_size = QK4_1; \
+ return __VA_ARGS__(); \
+ } \
+ case GGML_TYPE_Q8_0: { \
+ using type = block_q8_0; \
+ using vec_dot_type = block_q8_0; \
+ constexpr int blck_size = QK8_0; \
+ return __VA_ARGS__(); \
+ } \
+ case GGML_TYPE_Q4_K: { \
+ using type = block_q4_K; \
+ using vec_dot_type = block_q8_K; \
+ constexpr int blck_size = QK_K; \
+ return __VA_ARGS__(); \
+ } \
+ case GGML_TYPE_Q5_K: { \
+ using type = block_q5_K; \
+ using vec_dot_type = block_q8_K; \
+ constexpr int blck_size = QK_K; \
+ return __VA_ARGS__(); \
+ } \
+ case GGML_TYPE_Q6_K: { \
+ using type = block_q6_K; \
+ using vec_dot_type = block_q8_K; \
+ constexpr int blck_size = QK_K; \
+ return __VA_ARGS__(); \
+ } \
+ case GGML_TYPE_IQ4_XS: { \
+ using type = block_iq4_xs; \
+ using vec_dot_type = block_q8_K; \
+ constexpr int blck_size = QK_K; \
+ return __VA_ARGS__(); \
+ } \
+ default: \
+ fprintf(stderr, "Unsupported quantized data type: %d\n", int(TYPE)); \
+ } \
+ }()
+
+#define GGML_DISPATCH_BOOL(BOOL_V, BOOL_NAME, ...) \
+ [&] { \
+ if (BOOL_V) { \
+ constexpr bool BOOL_NAME = true; \
+ return __VA_ARGS__(); \
+ } else { \
+ constexpr bool BOOL_NAME = false; \
+ return __VA_ARGS__(); \
+ } \
+ }()
+
+// define amx tile config data structure
+struct tile_config_t{
+ uint8_t palette_id = 0;
+ uint8_t start_row = 0;
+ uint8_t reserved_0[14] = {0};
+ uint16_t colsb[16] = {0};
+ uint8_t rows[16] = {0};
+};
+
+// Notes: amx tile config
+//
+// Typically, TMUL calculates A and B of size 16 x 64 containing INT8 values,
+// and accumulate the result to a 16 x 16 matrix C containing INT32 values,
+//
+// As many GGUF quantized types as `block_size` of 32, so a 16-16-32 config is used
+// instead of the normally used 16-16-64 config.
+//
+// Block A: {16, 32}, dtype = int8_t
+// Block B: {16, 32}, dtype = uint8_t/int8_t
+// Block C: {16, 16}, dtype = int32_t
+//
+// Block B needs to be prepacked to vnni format before feeding into TMUL:
+// packed_B: from {n, k} to {k/vnni_blk, n, vnni_blck}, viewed in 2d, we get {8, 64}
+//
+// Therefore, we get tileconfig:
+// A B C
+// rows 16 8 16
+// colsb 32 64 16
+//
+// For tile distribution, follow a 2-2-4 pattern, e.g. A used TMM2-TMM3, B used TMM0-TMM1,
+// C used TMM4-TMM7:
+// B TMM0 B TMM1
+// A TMM2 C TMM4 C TMM6
+// A TMM3 C TMM5 C TMM7
+//
+// Each `amx` kernel handles 4 blocks at a time: 2MB * 2NB, when m < 2 * BLOCK_M, unpack A
+// will be needed.
+//
+// Here another commonly used pattern 1-3-3 is skipped, as it is mostly used when m <=16;
+// and the sinlge batch gemm (m=1) has a special fast path with `avx512-vnni`.
+//
+// ref: https://www.intel.com/content/www/us/en/developer/articles/code-sample/
+// advanced-matrix-extensions-intrinsics-functions.html
+//
+
+#define TC_CONFIG_TILE(i, r, cb) tc.rows[i] = r; tc.colsb[i] = cb
+void ggml_tile_config_init(void) {
+ static thread_local bool is_first_time = true;
+
+ if (!is_first_time) {
+ return;
+ }
+
+ static thread_local tile_config_t tc;
+ tile_config_t current_tc;
+ _tile_storeconfig(¤t_tc);
+
+ // load only when config changes
+ if (tc.palette_id == 0 || (memcmp(¤t_tc.colsb, &tc.colsb, sizeof(uint16_t) * 8) != 0 &&
+ memcmp(¤t_tc.rows, &tc.rows, sizeof(uint8_t) * 8) != 0)) {
+ tc.palette_id = 1;
+ tc.start_row = 0;
+ TC_CONFIG_TILE(TMM0, 8, 64);
+ TC_CONFIG_TILE(TMM1, 8, 64);
+ TC_CONFIG_TILE(TMM2, 16, 32);
+ TC_CONFIG_TILE(TMM3, 16, 32);
+ TC_CONFIG_TILE(TMM4, 16, 64);
+ TC_CONFIG_TILE(TMM5, 16, 64);
+ TC_CONFIG_TILE(TMM6, 16, 64);
+ TC_CONFIG_TILE(TMM7, 16, 64);
+ _tile_loadconfig(&tc);
+ }
+
+ is_first_time = false;
+}
+
+// we need an extra 16 * 4B (TILE_N * int32_t) for each NB/KB block for compensation.
+// See the notes `s8s8 igemm compensation in avx512-vnni` for detail.
+template <typename TB>
+int get_tile_size() {
+ int tile_size = TILE_N * sizeof(TB);
+ if (do_compensate<TB>::value) {
+ tile_size += TILE_N * sizeof(int32_t);
+ }
+ if (std::is_same<TB, block_q4_K>::value ||
+ std::is_same<TB, block_q5_K>::value) {
+ tile_size += TILE_N * 4;
+ }
+ if (std::is_same<TB, block_iq4_xs>::value) {
+ tile_size += TILE_N * 2;
+ }
+ return tile_size;
+}
+
+template <typename TB, int BLOCK_K>
+int get_row_size(int K) {
+ int KB = K / BLOCK_K;
+ int row_size = KB * sizeof(TB);
+ if (do_compensate<TB>::value) {
+ row_size += KB * sizeof(int32_t);
+ }
+ if (std::is_same<TB, block_q4_K>::value ||
+ std::is_same<TB, block_q5_K>::value) {
+ row_size += KB * 4;
+ }
+ if (std::is_same<TB, block_iq4_xs>::value) {
+ row_size += KB * 2;
+ }
+ return row_size;
+}
+
+// vectorized dtype conversion
+inline float FP16_TO_FP32(ggml_half val) {
+ __m256i v = _mm256_setr_epi16(
+ val, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ __m512 o = _mm512_cvtph_ps(v);
+ return _mm512_cvtss_f32(o);
+}
+
+inline __m512 FP16_TO_FP32_VEC(ggml_half val) {
+ __m256i v = _mm256_set1_epi16(val);
+ return _mm512_cvtph_ps(v);
+}
+
+// horizontal reduce
+inline float _mm512_reduce_max_ps(const __m512 x) {
+ __m512 v = x;
+ __m512 v1 = _mm512_shuffle_f32x4(v, v, 0x4E);
+ v = _mm512_max_ps(v, v1);
+ v1 = _mm512_shuffle_f32x4(v, v, 0xB1);
+ v = _mm512_max_ps(v, v1);
+ v1 = _mm512_shuffle_ps(v, v, 0x4E);
+ v = _mm512_max_ps(v, v1);
+ v1 = _mm512_shuffle_ps(v, v, 0xB1);
+ v = _mm512_max_ps(v, v1);
+ return _mm512_cvtss_f32(v);
+}
+
+// transpose utils
+#define SHUFFLE_EPI32(a, b, mask) \
+ _mm256_castps_si256(_mm256_shuffle_ps(_mm256_castsi256_ps(a), _mm256_castsi256_ps(b), mask))
+inline void transpose_8x8_32bit(__m256i * v, __m256i * v1) {
+ // unpacking and 32-bit elements
+ v1[0] = _mm256_unpacklo_epi32(v[0], v[1]);
+ v1[1] = _mm256_unpackhi_epi32(v[0], v[1]);
+ v1[2] = _mm256_unpacklo_epi32(v[2], v[3]);
+ v1[3] = _mm256_unpackhi_epi32(v[2], v[3]);
+ v1[4] = _mm256_unpacklo_epi32(v[4], v[5]);
+ v1[5] = _mm256_unpackhi_epi32(v[4], v[5]);
+ v1[6] = _mm256_unpacklo_epi32(v[6], v[7]);
+ v1[7] = _mm256_unpackhi_epi32(v[6], v[7]);
+
+ // shuffling the 32-bit elements
+ v[0] = SHUFFLE_EPI32(v1[0], v1[2], 0x44);
+ v[1] = SHUFFLE_EPI32(v1[0], v1[2], 0xee);
+ v[2] = SHUFFLE_EPI32(v1[4], v1[6], 0x44);
+ v[3] = SHUFFLE_EPI32(v1[4], v1[6], 0xee);
+ v[4] = SHUFFLE_EPI32(v1[1], v1[3], 0x44);
+ v[5] = SHUFFLE_EPI32(v1[1], v1[3], 0xee);
+ v[6] = SHUFFLE_EPI32(v1[5], v1[7], 0x44);
+ v[7] = SHUFFLE_EPI32(v1[5], v1[7], 0xee);
+
+ // shuffling 128-bit elements
+ v1[0] = _mm256_permute2f128_si256(v[2], v[0], 0x02);
+ v1[1] = _mm256_permute2f128_si256(v[3], v[1], 0x02);
+ v1[2] = _mm256_permute2f128_si256(v[6], v[4], 0x02);
+ v1[3] = _mm256_permute2f128_si256(v[7], v[5], 0x02);
+ v1[4] = _mm256_permute2f128_si256(v[2], v[0], 0x13);
+ v1[5] = _mm256_permute2f128_si256(v[3], v[1], 0x13);
+ v1[6] = _mm256_permute2f128_si256(v[6], v[4], 0x13);
+ v1[7] = _mm256_permute2f128_si256(v[7], v[5], 0x13);
+}
+
+inline void transpose_16x4_32bit(__m512i * r, __m512i * d) {
+
+ static const __m512i index1 = _mm512_set_epi32(
+ 0x0f, 0x0b, 0x07, 0x03,
+ 0x0e, 0x0a, 0x06, 0x02,
+ 0x0d, 0x09, 0x05, 0x01,
+ 0x0c, 0x08, 0x04, 0x00);
+
+ d[0] = _mm512_permutexvar_epi32(index1, r[0]);
+ d[1] = _mm512_permutexvar_epi32(index1, r[1]);
+ d[2] = _mm512_permutexvar_epi32(index1, r[2]);
+ d[3] = _mm512_permutexvar_epi32(index1, r[3]);
+
+ r[0] = _mm512_shuffle_i32x4(d[0], d[1], 0x44);
+ r[1] = _mm512_shuffle_i32x4(d[0], d[1], 0xee);
+ r[2] = _mm512_shuffle_i32x4(d[2], d[3], 0x44);
+ r[3] = _mm512_shuffle_i32x4(d[2], d[3], 0xee);
+
+ d[0] = _mm512_shuffle_i32x4(r[0], r[2], 0x88);
+ d[1] = _mm512_shuffle_i32x4(r[0], r[2], 0xdd);
+ d[2] = _mm512_shuffle_i32x4(r[1], r[3], 0x88);
+ d[3] = _mm512_shuffle_i32x4(r[1], r[3], 0xdd);
+}
+
+inline void transpose_16x16_32bit(__m512i * v) {
+ __m512i v1[16];
+ v1[0] = _mm512_unpacklo_epi32(v[0], v[1]);
+ v1[1] = _mm512_unpackhi_epi32(v[0], v[1]);
+ v1[2] = _mm512_unpacklo_epi32(v[2], v[3]);
+ v1[3] = _mm512_unpackhi_epi32(v[2], v[3]);
+ v1[4] = _mm512_unpacklo_epi32(v[4], v[5]);
+ v1[5] = _mm512_unpackhi_epi32(v[4], v[5]);
+ v1[6] = _mm512_unpacklo_epi32(v[6], v[7]);
+ v1[7] = _mm512_unpackhi_epi32(v[6], v[7]);
+ v1[8] = _mm512_unpacklo_epi32(v[8], v[9]);
+ v1[9] = _mm512_unpackhi_epi32(v[8], v[9]);
+ v1[10] = _mm512_unpacklo_epi32(v[10], v[11]);
+ v1[11] = _mm512_unpackhi_epi32(v[10], v[11]);
+ v1[12] = _mm512_unpacklo_epi32(v[12], v[13]);
+ v1[13] = _mm512_unpackhi_epi32(v[12], v[13]);
+ v1[14] = _mm512_unpacklo_epi32(v[14], v[15]);
+ v1[15] = _mm512_unpackhi_epi32(v[14], v[15]);
+
+ v[0] = _mm512_unpacklo_epi64(v1[0], v1[2]);
+ v[1] = _mm512_unpackhi_epi64(v1[0], v1[2]);
+ v[2] = _mm512_unpacklo_epi64(v1[1], v1[3]);
+ v[3] = _mm512_unpackhi_epi64(v1[1], v1[3]);
+ v[4] = _mm512_unpacklo_epi64(v1[4], v1[6]);
+ v[5] = _mm512_unpackhi_epi64(v1[4], v1[6]);
+ v[6] = _mm512_unpacklo_epi64(v1[5], v1[7]);
+ v[7] = _mm512_unpackhi_epi64(v1[5], v1[7]);
+ v[8] = _mm512_unpacklo_epi64(v1[8], v1[10]);
+ v[9] = _mm512_unpackhi_epi64(v1[8], v1[10]);
+ v[10] = _mm512_unpacklo_epi64(v1[9], v1[11]);
+ v[11] = _mm512_unpackhi_epi64(v1[9], v1[11]);
+ v[12] = _mm512_unpacklo_epi64(v1[12], v1[14]);
+ v[13] = _mm512_unpackhi_epi64(v1[12], v1[14]);
+ v[14] = _mm512_unpacklo_epi64(v1[13], v1[15]);
+ v[15] = _mm512_unpackhi_epi64(v1[13], v1[15]);
+
+ v1[0] = _mm512_shuffle_i32x4(v[0], v[4], 0x88);
+ v1[1] = _mm512_shuffle_i32x4(v[1], v[5], 0x88);
+ v1[2] = _mm512_shuffle_i32x4(v[2], v[6], 0x88);
+ v1[3] = _mm512_shuffle_i32x4(v[3], v[7], 0x88);
+ v1[4] = _mm512_shuffle_i32x4(v[0], v[4], 0xdd);
+ v1[5] = _mm512_shuffle_i32x4(v[1], v[5], 0xdd);
+ v1[6] = _mm512_shuffle_i32x4(v[2], v[6], 0xdd);
+ v1[7] = _mm512_shuffle_i32x4(v[3], v[7], 0xdd);
+ v1[8] = _mm512_shuffle_i32x4(v[8], v[12], 0x88);
+ v1[9] = _mm512_shuffle_i32x4(v[9], v[13], 0x88);
+ v1[10] = _mm512_shuffle_i32x4(v[10], v[14], 0x88);
+ v1[11] = _mm512_shuffle_i32x4(v[11], v[15], 0x88);
+ v1[12] = _mm512_shuffle_i32x4(v[8], v[12], 0xdd);
+ v1[13] = _mm512_shuffle_i32x4(v[9], v[13], 0xdd);
+ v1[14] = _mm512_shuffle_i32x4(v[10], v[14], 0xdd);
+ v1[15] = _mm512_shuffle_i32x4(v[11], v[15], 0xdd);
+
+ v[0] = _mm512_shuffle_i32x4(v1[0], v1[8], 0x88);
+ v[1] = _mm512_shuffle_i32x4(v1[1], v1[9], 0x88);
+ v[2] = _mm512_shuffle_i32x4(v1[2], v1[10], 0x88);
+ v[3] = _mm512_shuffle_i32x4(v1[3], v1[11], 0x88);
+ v[4] = _mm512_shuffle_i32x4(v1[4], v1[12], 0x88);
+ v[5] = _mm512_shuffle_i32x4(v1[5], v1[13], 0x88);
+ v[6] = _mm512_shuffle_i32x4(v1[6], v1[14], 0x88);
+ v[7] = _mm512_shuffle_i32x4(v1[7], v1[15], 0x88);
+ v[8] = _mm512_shuffle_i32x4(v1[0], v1[8], 0xdd);
+ v[9] = _mm512_shuffle_i32x4(v1[1], v1[9], 0xdd);
+ v[10] = _mm512_shuffle_i32x4(v1[2], v1[10], 0xdd);
+ v[11] = _mm512_shuffle_i32x4(v1[3], v1[11], 0xdd);
+ v[12] = _mm512_shuffle_i32x4(v1[4], v1[12], 0xdd);
+ v[13] = _mm512_shuffle_i32x4(v1[5], v1[13], 0xdd);
+ v[14] = _mm512_shuffle_i32x4(v1[6], v1[14], 0xdd);
+ v[15] = _mm512_shuffle_i32x4(v1[7], v1[15], 0xdd);
+}
+
+void quantize_row_q8_K_vnni(const float * RESTRICT x, void * RESTRICT vy, int64_t k) {
+ assert(k % QK_K == 0);
+ const int KB = k / QK_K;
+ constexpr int kVecs = QK_K / 16;
+
+ block_q8_K * y = reinterpret_cast<block_q8_K *>(vy);
+
+ // hold 16 float vecs from x
+ __m512 v[kVecs];
+
+ // hold the quants vecs
+ __m512i vq[kVecs / 4];
+
+ // hold the packed quants vecs
+ __m512i vq_packed[kVecs / 4];
+
+ const __m512 signBit = _mm512_set1_ps(-0.f);
+
+ for (int i = 0; i < KB; ++i) {
+ // Compute max(abs(e)) for the block
+ __m512 vamax = _mm512_set1_ps(0.f);
+ for (int j = 0; j < kVecs; ++j) {
+ v[j] = _mm512_loadu_ps(x); x += 16;
+ vamax = _mm512_max_ps(vamax, _mm512_andnot_ps(signBit, v[j]));
+ }
+ const float amax = _mm512_reduce_max_ps(vamax);
+
+ // Quantize these floats
+ const float iscale = 127.f / amax;
+ y[i].d = GGML_FP32_TO_FP16(1 / iscale);
+ const float id = ( amax != 0.0f ) ? iscale : 0.f;
+ const __m512 vscale = _mm512_set1_ps(id);
+
+ // Apply multiplier and round to nearest integer
+ for (int j = 0; j < kVecs; ++j) {
+ v[j] = _mm512_mul_ps(v[j], vscale);
+ v[j] = _mm512_roundscale_ps(v[j], (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
+ }
+
+ // Pack to epi8 vecs
+ for (int j = 0; j < kVecs / 4; ++j) {
+ __m128i q8_0 = _mm512_cvtepi32_epi8(_mm512_cvtps_epi32(v[j * 4 + 0]));
+ __m128i q8_1 = _mm512_cvtepi32_epi8(_mm512_cvtps_epi32(v[j * 4 + 1]));
+ __m128i q8_2 = _mm512_cvtepi32_epi8(_mm512_cvtps_epi32(v[j * 4 + 2]));
+ __m128i q8_3 = _mm512_cvtepi32_epi8(_mm512_cvtps_epi32(v[j * 4 + 3]));
+
+ __m256i q8_01 = _mm256_insertf128_si256(_mm256_castsi128_si256(q8_0), (q8_1), 1);
+ __m256i q8_23 = _mm256_insertf128_si256(_mm256_castsi128_si256(q8_2), (q8_3), 1);
+
+ vq[j] = _mm512_inserti32x8(_mm512_castsi256_si512(q8_01), q8_23, 1);
+ _mm512_storeu_si512((__m512i *)(y[i].qs + j * 64), vq[j]);
+ }
+
+ // Compute the bsums with vnni
+ transpose_16x4_32bit(vq, vq_packed);
+
+ const __m512i one = _mm512_set1_epi8(1);
+ __m512i sum = _mm512_setzero_si512();
+ for (int k = 0; k < 4; ++k) {
+ sum = _mm512_dpbusd_epi32(sum, one, vq_packed[k]);
+ }
+ _mm256_storeu_si256((__m256i *)(y[i].bsums), _mm512_cvtepi32_epi16(sum));
+ }
+}
+
+// quantize A from float to `vec_dot_type`
+template <typename T>
+inline void from_float(const float * x, char * vy, int64_t k);
+
+template <>
+inline void from_float<block_q8_0>(const float * x, char * vy, int64_t k) {
+ quantize_row_q8_0(x, (block_q8_0 *)vy, k);
+}
+
+template <>
+inline void from_float<block_q8_1>(const float * x, char * vy, int64_t k) {
+ quantize_row_q8_1(x, (block_q8_1 *)vy, k);
+}
+
+template <>
+inline void from_float<block_q8_K>(const float * x, char * vy, int64_t k) {
+#if 1
+ // TODO: this is reference impl!
+ quantize_row_q8_K_ref(x, (block_q8_K *)vy, k);
+#else
+ quantize_row_q8_K_vnni(x, vy, k);
+#endif
+}
+
+// load A from memory to array when nrows can not fill in whole tile
+void unpack_A(int8_t * RESTRICT tile, const block_q8_0 * RESTRICT A, int lda, int nr) {
+ assert(nr != TILE_M);
+ for (int m = 0; m < nr; ++m) {
+ const __m256i v = _mm256_loadu_si256((const __m256i *)(A[m * lda].qs));
+ _mm256_storeu_si256((__m256i *)(tile + m * TILE_K), v);
+ }
+}
+
+void unpack_A(int8_t * RESTRICT tile, const block_q8_1 * RESTRICT A, int lda, int nr) {
+ assert(nr != TILE_M);
+ for (int m = 0; m < nr; ++m) {
+ const __m256i v = _mm256_loadu_si256((const __m256i *)(A[m * lda].qs));
+ _mm256_storeu_si256((__m256i *)(tile + m * TILE_K), v);
+ }
+}
+
+template <typename TB>
+void unpack_A(int8_t * RESTRICT tile, const block_q8_K * RESTRICT A, int lda, int k, int nr) {
+ assert(nr <= TILE_M);
+ for (int m = 0; m < nr; ++m) {
+ const __m256i v = _mm256_loadu_si256((const __m256i *)(A[m * lda].qs + k * 32));
+ _mm256_storeu_si256((__m256i *)(tile + m * TILE_K), v);
+ }
+}
+
+template <>
+void unpack_A<block_q6_K>(int8_t * RESTRICT tile, const block_q8_K * RESTRICT A, int lda, int k, int nr) {
+ assert(nr <= TILE_M);
+ // zero padding k from 16 to 32, so that we don't have to re-config amx
+ const __m128i zero = _mm_setzero_si128();
+ for (int m = 0; m < nr; ++m) {
+ const __m128i v = _mm_loadu_si128((const __m128i *)(A[m * lda].qs + k * 16));
+ const __m256i r = _mm256_insertf128_si256(_mm256_castsi128_si256(v), zero, 1);
+ _mm256_storeu_si256((__m256i *)(tile + m * TILE_K), r);
+ }
+}
+
+#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
+inline __m256i bytes_from_nibbles_32(const uint8_t * rsi) {
+ const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
+ const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
+ const __m256i lowMask = _mm256_set1_epi8(0xF);
+ return _mm256_and_si256(lowMask, bytes);
+}
+
+// used for block_q4_K
+inline __m512i bytes_from_nibbles_64(const uint8_t * rsi) {
+ const __m256i tmp = _mm256_loadu_si256((const __m256i *)rsi);
+ const __m256i lowMask = _mm256_set1_epi8(0xF);
+ const __m256i q4l = _mm256_and_si256(tmp, lowMask);
+ const __m256i q4h = _mm256_and_si256(_mm256_srli_epi16(tmp, 4), lowMask);
+ return _mm512_inserti32x8(_mm512_castsi256_si512(q4l), q4h, 1);
+}
+
+// used for block_q5_K
+inline __m512i bytes_from_nibbles_64(const uint8_t * qs, const uint8_t * qh, int k) {
+ const __m256i lowMask = _mm256_set1_epi8(0xF);
+ __m256i hmask = _mm256_set1_epi8(1);
+ hmask = _mm256_slli_epi16(hmask, k);
+
+ const __m256i q5bits = _mm256_loadu_si256((const __m256i *)qs);
+ const __m256i hbits = _mm256_loadu_si256((const __m256i *)qh);
+
+ const __m256i q5l_0 = _mm256_and_si256(q5bits, lowMask);
+ const __m256i q5h_0 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), k + 0), 4);
+ const __m256i q5_0 = _mm256_add_epi8(q5l_0, q5h_0);
+ hmask = _mm256_slli_epi16(hmask, 1);
+
+ const __m256i q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), lowMask);
+ const __m256i q5h_1 = _mm256_slli_epi16(_mm256_srli_epi16(_mm256_and_si256(hbits, hmask), k + 1), 4);
+ const __m256i q5_1 = _mm256_add_epi8(q5l_1, q5h_1);
+
+ return _mm512_inserti32x8(_mm512_castsi256_si512(q5_0), q5_1, 1);
+}
+
+// used for block_q6_K
+inline void bytes_from_nibbles_128(__m512i& r0, __m512i& r1, const uint8_t * qs, const uint8_t * qh) {
+ const __m256i m4 = _mm256_set1_epi8(0xF);
+ const __m256i m2 = _mm256_set1_epi8(0x3);
+
+ const __m256i q6bits1 = _mm256_loadu_si256((const __m256i *)qs);
+ const __m256i q6bits2 = _mm256_loadu_si256((const __m256i *)(qs + 32));
+ const __m256i q6bitsH = _mm256_loadu_si256((const __m256i *)qh);
+
+ const __m256i q6h_0 = _mm256_slli_epi16(_mm256_and_si256( q6bitsH, m2), 4);
+ const __m256i q6h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q6bitsH, 2), m2), 4);
+ const __m256i q6h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q6bitsH, 4), m2), 4);
+ const __m256i q6h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q6bitsH, 6), m2), 4);
+
+ const __m256i q6_0 = _mm256_or_si256(_mm256_and_si256(q6bits1, m4), q6h_0);
+ const __m256i q6_1 = _mm256_or_si256(_mm256_and_si256(q6bits2, m4), q6h_1);
+ const __m256i q6_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q6bits1, 4), m4), q6h_2);
+ const __m256i q6_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q6bits2, 4), m4), q6h_3);
+
+ r0 = _mm512_inserti32x8(_mm512_castsi256_si512(q6_0), q6_1, 1);
+ r1 = _mm512_inserti32x8(_mm512_castsi256_si512(q6_2), q6_3, 1);
+}
+
+inline __m512i packNibbles(__m512i r0, __m512i r1) {
+ return _mm512_or_si512(r0, _mm512_slli_epi16(r1, 4));
+}
+
+template <typename TB>
+inline void pack_qs(void * RESTRICT packed_B, const TB * RESTRICT B, int KB) {
+ int8_t tmp[8 * 64];
+ __m256i v[8], v2[8];
+ for (int n = 0; n < 8; ++n) {
+ v[n] = bytes_from_nibbles_32(B[n * KB].qs);
+ }
+ transpose_8x8_32bit(v, v2);
+ for (int n = 0; n < 8; ++n) {
+ _mm256_storeu_si256((__m256i *)(tmp + n * 64), v2[n]);
+ }
+ for (int n = 0; n < 8; ++n) {
+ v[n] = bytes_from_nibbles_32(B[(n + 8) * KB].qs);
+ }
+ transpose_8x8_32bit(v, v2);
+ for (int n = 0; n < 8; ++n) {
+ _mm256_storeu_si256((__m256i *)(tmp + n * 64 + 32), v2[n]);
+ }
+
+ // pack again with 128 to fully utilize vector length
+ for (int n = 0; n < 8; n += 2) {
+ __m512i r0 = _mm512_loadu_si512((const __m512i *)(tmp + n * 64));
+ __m512i r1 = _mm512_loadu_si512((const __m512i *)(tmp + n * 64 + 64));
+ __m512i r1r0 = packNibbles(r0, r1);
+ _mm512_storeu_si512((__m512i *)((char *)packed_B + n * 32), r1r0);
+ }
+}
+
+template <>
+inline void pack_qs<block_q8_0>(void * RESTRICT packed_B, const block_q8_0 * RESTRICT B, int KB) {
+ __m256i v[8], v2[8];
+ for (int n = 0; n < 8; ++n) {
+ v[n] = _mm256_loadu_si256((const __m256i *)(B[n * KB].qs));
+ }
+ transpose_8x8_32bit(v, v2);
+ for (int n = 0; n < 8; ++n) {
+ _mm256_storeu_si256((__m256i *)((char *)packed_B + n * 64), v2[n]);
+ }
+ for (int n = 0; n < 8; ++n) {
+ v[n] = _mm256_loadu_si256((const __m256i *)(B[(n + 8) * KB].qs));
+ }
+ transpose_8x8_32bit(v, v2);
+ for (int n = 0; n < 8; ++n) {
+ _mm256_storeu_si256((__m256i *)((char *)packed_B + n * 64 + 32), v2[n]);
+ }
+}
+
+template <>
+inline void pack_qs<block_q4_K>(void * RESTRICT packed_B, const block_q4_K * RESTRICT B, int KB) {
+ __m512i v[16];
+ // QK_K 256 with 8 groups, handle 2 groups at a time
+ char * pb = (char *)packed_B;
+ for (int k = 0; k < QK_K / 64; ++k) {
+ // pack 2 groups { n, g, k} to {g, k/4, 4n}
+ // e.g. {16, 2, 32} to {2, 8, 64}
+ for (int n = 0; n < TILE_N; ++n) {
+ v[n] = bytes_from_nibbles_64(B[n * KB].qs + k * 32);
+ }
+
+ transpose_16x16_32bit(v);
+
+ // pack again with 128 to fully utilize vector length
+ for (int n = 0; n < TILE_N; n += 2) {
+ _mm512_storeu_si512((__m512i *)pb, packNibbles(v[n], v[n + 1]));
+ pb += 64;
+ }
+ }
+}
+
+template <>
+inline void pack_qs<block_q5_K>(void * RESTRICT packed_B, const block_q5_K * RESTRICT B, int KB) {
+ __m512i v[16];
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+ // QK_K 256 with 8 groups, handle 2 groups at a time
+ char * pb = (char *)packed_B;
+ char * ph = (char *)packed_B + (QK_K / 2) * TILE_N;
+ for (int k = 0; k < QK_K / 64; ++k) {
+ // pack 2 groups { n, g, k} to {g, k/4, 4n}
+ // e.g. {16, 2, 32} to {2, 8, 64}
+ for (int n = 0; n < TILE_N; ++n) {
+ v[n] = bytes_from_nibbles_64(B[n * KB].qs + k * 32, B[n * KB].qh, /* group */2 * k);
+ }
+
+ transpose_16x16_32bit(v);
+
+ // 1. pack lower 4bits with 2 groups
+ for (int n = 0; n < TILE_N; n += 2) {
+ // get lower 4 bits
+ const __m512i r0 = _mm512_and_si512(v[n], lowMask);
+ const __m512i r1 = _mm512_and_si512(v[n + 1], lowMask);
+ _mm512_storeu_si512((__m512i *)pb, packNibbles(r0, r1)); pb += 64;
+ }
+
+ // 2. pack higher 1bit with 2 groups
+ const __m512i hmask = _mm512_set1_epi8(0x10);
+ for (int g = 0; g < 2; ++g) {
+ __m512i hbits = _mm512_setzero_si512();
+ hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 8 + 0], hmask), 4));
+ hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 8 + 1], hmask), 3));
+ hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 8 + 2], hmask), 2));
+ hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 8 + 3], hmask), 1));
+ hbits = _mm512_add_epi8(hbits, _mm512_and_si512(v[g * 8 + 4], hmask) );
+ hbits = _mm512_add_epi8(hbits, _mm512_slli_epi16(_mm512_and_si512(v[g * 8 + 5], hmask), 1));
+ hbits = _mm512_add_epi8(hbits, _mm512_slli_epi16(_mm512_and_si512(v[g * 8 + 6], hmask), 2));
+ hbits = _mm512_add_epi8(hbits, _mm512_slli_epi16(_mm512_and_si512(v[g * 8 + 7], hmask), 3));
+ _mm512_storeu_si512((__m512i *)ph, hbits); ph += 64;
+ }
+ }
+}
+
+template <>
+inline void pack_qs<block_q6_K>(void * RESTRICT packed_B, const block_q6_K * RESTRICT B, int KB) {
+ __m512i v[32];
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+ // QK_K 256 with 8 groups, handle 4 groups at a time
+ char * pb = (char *)packed_B;
+ char * ph = (char *)packed_B + (QK_K / 2) * TILE_N;
+ for (int k = 0; k < QK_K / 128; ++k) {
+ for (int n = 0; n < TILE_N; ++n) {
+ bytes_from_nibbles_128(v[n], v[n + 16], B[n * KB].ql + k * 64, B[n * KB].qh + k * 32);
+ }
+
+ // top half: group 0,1 or 4,5; bottom half: group 2,3 or 6,7
+ transpose_16x16_32bit(v);
+ transpose_16x16_32bit(v + 16);
+
+ // 1. pack lower 4bits with 4 groups
+ for (int n = 0; n < 32; n += 2) {
+ const __m512i r0 = _mm512_and_si512(v[n], lowMask);
+ const __m512i r1 = _mm512_and_si512(v[n + 1], lowMask);
+ _mm512_storeu_si512((__m512i *)pb, packNibbles(r0, r1)); pb += 64;
+ }
+
+ // 2. pack higher 2bit with 4 groups
+ const __m512i hmask = _mm512_set1_epi8(0x30);
+ for (int g = 0; g < 8; ++g) {
+ __m512i hbits = _mm512_setzero_si512();
+ hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 4 + 0], hmask), 4));
+ hbits = _mm512_add_epi8(hbits, _mm512_srli_epi16(_mm512_and_si512(v[g * 4 + 1], hmask), 2));
+ hbits = _mm512_add_epi8(hbits, _mm512_and_si512(v[g * 4 + 2], hmask) );
+ hbits = _mm512_add_epi8(hbits, _mm512_slli_epi16(_mm512_and_si512(v[g * 4 + 3], hmask), 2));
+ _mm512_storeu_si512((__m512i *)ph, hbits); ph += 64;
+ }
+ }
+}
+
+template <>
+inline void pack_qs<block_iq4_xs>(void * RESTRICT packed_B, const block_iq4_xs * RESTRICT B, int KB) {
+ __m512i v[16];
+ char * pb = (char *)packed_B;
+ for (int k = 0; k < QK_K / 64; ++k) {
+ for (int n = 0; n < TILE_N; ++n) {
+ __m256i r0 = bytes_from_nibbles_32(B[n * KB].qs + k * 32 + 0);
+ __m256i r1 = bytes_from_nibbles_32(B[n * KB].qs + k * 32 + 16);
+ v[n] = _mm512_inserti32x8(_mm512_castsi256_si512(r0), r1, 1);
+ }
+
+ transpose_16x16_32bit(v);
+
+ // pack again with 128 to fully utilize vector length
+ for (int n = 0; n < TILE_N; n += 2) {
+ _mm512_storeu_si512((__m512i *)pb, packNibbles(v[n], v[n + 1]));
+ pb += 64;
+ }
+ }
+}
+
+// pack B to vnni formats in 4bits or 8 bits
+void pack_B(void * RESTRICT packed_B, const block_q4_0 * RESTRICT B, int KB) {
+ pack_qs(packed_B, B, KB);
+ ggml_half * d0 = reinterpret_cast<ggml_half *>((char *)packed_B + TILE_N * TILE_K / 2);
+ for (int n = 0; n < TILE_N; ++n) {
+ d0[n] = B[n * KB].d;
+ }
+}
+
+void pack_B(void * RESTRICT packed_B, const block_q4_1 * RESTRICT B, int KB) {
+ pack_qs(packed_B, B, KB);
+ ggml_half * d0 = reinterpret_cast<ggml_half *>((char *)packed_B + TILE_N * TILE_K / 2);
+ ggml_half * m0 = d0 + TILE_N;
+ for (int n = 0; n < TILE_N; ++n) {
+ d0[n] = B[n * KB].d;
+ m0[n] = B[n * KB].m;
+ }
+}
+
+inline void s8s8_compensation(void * RESTRICT packed_B) {
+ // packed_B layout:
+ // quants {TILE_N, TILEK} int8_t
+ // d0 {TILE_N} ggml_half
+ // comp {TILE_N} int32_t
+ const int offset = TILE_N * TILE_K + TILE_N * sizeof(ggml_half);
+ __m512i vcomp = _mm512_setzero_si512();
+ const __m512i off = _mm512_set1_epi8(static_cast<char>(0x80));
+ for (int k = 0; k < 8; ++k) {
+ __m512i vb = _mm512_loadu_si512((const __m512i *)((const char *)packed_B + k * 64));
+ vcomp = _mm512_dpbusd_epi32(vcomp, off, vb);
+ }
+ _mm512_storeu_si512((__m512i *)((char *)(packed_B) + offset), vcomp);
+}
+
+void pack_B(void * RESTRICT packed_B, const block_q8_0 * RESTRICT B, int KB) {
+ pack_qs(packed_B, B, KB);
+ ggml_half * d0 = reinterpret_cast<ggml_half *>((char *)packed_B + TILE_N * TILE_K);
+ for (int n = 0; n < TILE_N; ++n) {
+ d0[n] = B[n * KB].d;
+ }
+ s8s8_compensation(packed_B);
+}
+
+// convert 8 * {min, scale} from int6 to int8
+inline void unpack_mins_and_scales(const uint8_t * scales, uint32_t * utmp) {
+ const uint32_t kmask1 = 0x3f3f3f3f;
+ const uint32_t kmask2 = 0x0f0f0f0f;
+ const uint32_t kmask3 = 0x03030303;
+
+ memcpy(utmp, scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+}
+
+// packed_B layout:
+// quants {8, TILE_N, 16} uint8
+// scales {8, TILE_N} uint8
+// mins {8, TILE_N} uint8
+// d {TILE_N} ggml_half
+// dmin {TILE_N} ggml_half
+void pack_B(void * RESTRICT packed_B, const block_q4_K * RESTRICT B, int KB) {
+ pack_qs(packed_B, B, KB);
+
+ uint8_t * scales = reinterpret_cast<uint8_t *>((char *)packed_B + (QK_K / 2) * TILE_N);
+ uint8_t * mins = scales + 8 * TILE_N;
+ ggml_half * d = reinterpret_cast<ggml_half *>(mins + 8 * TILE_N);
+ ggml_half * dmin = d + TILE_N;
+
+ union {
+ uint32_t u32[4];
+ uint8_t u8[16];
+ } s;
+
+ for (int n = 0; n < TILE_N; ++n) {
+ unpack_mins_and_scales(B[n * KB].scales, s.u32);
+ for (int k = 0; k < 8; ++k) {
+ scales[k * TILE_N + n] = s.u8[k];
+ mins[(k >> 1) * TILE_N * 2 + n * 2 + (k & 0x1)] = s.u8[k + 8];
+ }
+ d[n] = B[n * KB].d;
+ dmin[n] = B[n * KB].dmin;
+ }
+}
+
+// packed_B layout:
+// quants {8, TILE_N, 16} uint8
+// qh {8, TILE_N, 4} uint8
+// scales {8, TILE_N} uint8
+// mins {8, TILE_N} uint8
+// d {TILE_N} ggml_half
+// dmin {TILE_N} ggml_half
+void pack_B(void * RESTRICT packed_B, const block_q5_K * RESTRICT B, int KB) {
+ pack_qs(packed_B, B, KB);
+
+ uint8_t * scales = reinterpret_cast<uint8_t *>((char *)packed_B + (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N);
+ uint8_t * mins = scales + 8 * TILE_N;
+ ggml_half * d = reinterpret_cast<ggml_half *>(mins + 8 * TILE_N);
+ ggml_half * dmin = d + TILE_N;
+
+ union {
+ uint32_t u32[4];
+ uint8_t u8[16];
+ } s;
+
+ for (int n = 0; n < TILE_N; ++n) {
+ unpack_mins_and_scales(B[n * KB].scales, s.u32);
+ for (int k = 0; k < 8; ++k) {
+ scales[k * TILE_N + n] = s.u8[k];
+ mins[(k >> 1) * TILE_N * 2 + n * 2 + (k & 0x1)] = s.u8[k + 8];
+ }
+ d[n] = B[n * KB].d;
+ dmin[n] = B[n * KB].dmin;
+ }
+}
+
+// packed_B layout:
+// quants {16, TILE_N, 8} uint8
+// qh {16, TILE_N, 4} uint8
+// scales {16, TILE_N} uint8
+// d {TILE_N} ggml_half
+void pack_B(void * RESTRICT packed_B, const block_q6_K * RESTRICT B, int KB) {
+ pack_qs(packed_B, B, KB);
+
+ uint8_t * scales = reinterpret_cast<uint8_t *>((char *)packed_B + (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N);
+ ggml_half * d = reinterpret_cast<ggml_half *>(scales + 16 * TILE_N);
+ for (int n = 0; n < TILE_N; ++n) {
+ const int8_t * ps = B[n * KB].scales;
+ for (int k = 0; k < 16; ++k) {
+ scales[k * TILE_N + n] = ps[k];
+ }
+ d[n] = B[n * KB].d;
+ }
+}
+
+// packed_B layout:
+// quants {8, TILE_N, 16} uint8
+// scales {8, TILE_N} int8
+// d {TILE_N} ggml_half
+void pack_B(void * RESTRICT packed_B, const block_iq4_xs * RESTRICT B, int KB) {
+ pack_qs(packed_B, B, KB);
+
+ int8_t * scales = reinterpret_cast<int8_t *>((char *)packed_B + (QK_K / 2) * TILE_N);
+ ggml_half * d = reinterpret_cast<ggml_half *>(scales + 8 * TILE_N);
+
+ // pack the scales
+ for (int n = 0; n < TILE_N; ++n) {
+ uint16_t sh = B[n * KB].scales_h;
+ for (int k = 0; k < 8; k += 2) {
+ const int16_t ls1 = ((B[n * KB].scales_l[k / 2] & 0xf) | ((sh << 4) & 0x30)) - 32;
+ const int16_t ls2 = ((B[n * KB].scales_l[k / 2] >> 4) | ((sh << 2) & 0x30)) - 32;
+ scales[(k + 0) * TILE_N + n] = ls1;
+ scales[(k + 1) * TILE_N + n] = ls2;
+ sh >>= 4;
+ }
+ d[n] = B[n * KB].d;
+ }
+}
+
+template<typename TB, typename packed_B_t = packed_B_type<TB>>
+void unpack_B(packed_B_t * RESTRICT tile, const void * RESTRICT packed_B) {
+ GGML_UNUSED(tile);
+ GGML_UNUSED(packed_B);
+}
+
+template <>
+void unpack_B<block_q4_0>(int8_t * RESTRICT tile, const void * RESTRICT packed_B) {
+ const __m512i off = _mm512_set1_epi8(8);
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+ for (int n = 0; n < 8; n += 2) {
+ __m512i bytes = _mm512_loadu_si512((const __m512i *)((const char *)packed_B + n * 32));
+ const __m512i r0 = _mm512_sub_epi8(_mm512_and_si512(bytes, lowMask), off);
+ const __m512i r1 = _mm512_sub_epi8(_mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask), off);
+ _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0);
+ _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1);
+ }
+}
+
+template <>
+void unpack_B<block_q4_1>(uint8_t * RESTRICT tile, const void * RESTRICT packed_B) {
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+ for (int n = 0; n < 8; n += 2) {
+ __m512i bytes = _mm512_loadu_si512((const __m512i *)((const char *)packed_B + n * 32));
+ const __m512i r0 = _mm512_and_si512(bytes, lowMask);
+ const __m512i r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
+ _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0);
+ _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1);
+ }
+}
+
+// packed_B_t for QKK is int8_t
+template <typename TB>
+void unpack_B(int8_t * RESTRICT tile, const void * RESTRICT packed_B, int k) {
+ const int packed_B_group_size = QK_K / 2 * TILE_N / 8;
+ const char * packed_B_group = (const char *)packed_B + k * packed_B_group_size;
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+ for (int n = 0; n < 8; n += 2) {
+ __m512i bytes = _mm512_loadu_si512(packed_B_group + n * 32);
+ const __m512i r0 = _mm512_and_si512(bytes, lowMask);
+ const __m512i r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
+ _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0);
+ _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1);
+ }
+}
+
+template <>
+void unpack_B<block_q5_K>(int8_t * RESTRICT tile, const void * RESTRICT packed_B, int k) {
+ // lower 4bits, stride 256 bytes
+ const int packed_l4_group_size = QK_K / 2 * TILE_N / 8;
+ const char * pb = (const char *)packed_B + k * packed_l4_group_size;
+
+ // higher 1bit, stride 64 bytes
+ const int packed_h1_group_size = QK_K / 8 * TILE_N / 8;
+ const char * ph = (const char *)packed_B + (QK_K / 2) * TILE_N + k * packed_h1_group_size;
+ const __m512i hbits = _mm512_loadu_si512(ph);
+
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+ __m512i hmask0 = _mm512_set1_epi8(0x1);
+ __m512i hmask1 = _mm512_set1_epi8(0x2);
+
+ for (int n = 0; n < 8; n += 2) {
+ __m512i bytes = _mm512_loadu_si512(pb + n * 32);
+ __m512i r0 = _mm512_and_si512(bytes, lowMask);
+ __m512i r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
+ __m512i h0 = _mm512_slli_epi16(_mm512_srli_epi16(_mm512_and_si512(hbits, hmask0), n), 4);
+ __m512i h1 = _mm512_slli_epi16(_mm512_srli_epi16(_mm512_and_si512(hbits, hmask1), n + 1), 4);
+
+ hmask0 = _mm512_slli_epi16(hmask0, 2);
+ hmask1 = _mm512_slli_epi16(hmask1, 2);
+ r0 = _mm512_add_epi8(r0, h0);
+ r1 = _mm512_add_epi8(r1, h1);
+ _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0);
+ _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1);
+ }
+}
+
+template <>
+void unpack_B<block_q6_K>(int8_t * RESTRICT tile, const void * RESTRICT packed_B, int k) {
+ // lower 4bits, stride 128 bytes
+ const int packed_l4_group_size = QK_K / 2 * TILE_N / 16;
+ const char * pb = (const char *)packed_B + k * packed_l4_group_size;
+
+ // higher 2bits, stride 64 bytes
+ const int packed_h2_group_size = QK_K / 4 * TILE_N / 16;
+ const char * ph = (const char *)packed_B + (QK_K / 2) * TILE_N + k * packed_h2_group_size;
+ const __m512i hbits = _mm512_loadu_si512(ph);
+
+ const __m512i off = _mm512_set1_epi8(32);
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+ __m512i hmask0 = _mm512_set1_epi8(0x3); // 0011
+ __m512i hmask1 = _mm512_set1_epi8(0xC); // 1100
+
+ // notes: skip zero padding from row4 to row7 as we have done so in `unpack_A`
+ __m512i bytes = _mm512_loadu_si512(pb);
+ __m512i r0 = _mm512_and_si512(bytes, lowMask);
+ __m512i r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
+ __m512i h0 = _mm512_slli_epi16(_mm512_and_si512(hbits, hmask0), 4);
+ __m512i h1 = _mm512_slli_epi16(_mm512_and_si512(hbits, hmask1), 2);
+ _mm512_storeu_si512((__m512i *)(tile + 0), _mm512_sub_epi8(_mm512_add_epi8(r0, h0), off));
+ _mm512_storeu_si512((__m512i *)(tile + 64), _mm512_sub_epi8(_mm512_add_epi8(r1, h1), off));
+
+ hmask0 = _mm512_slli_epi16(hmask0, 4);
+ hmask1 = _mm512_slli_epi16(hmask1, 4);
+
+ bytes = _mm512_loadu_si512(pb + 64);
+ r0 = _mm512_and_si512(bytes, lowMask);
+ r1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
+ h0 = _mm512_and_si512(hbits, hmask0);
+ h1 = _mm512_srli_epi16(_mm512_and_si512(hbits, hmask1), 2);
+ _mm512_storeu_si512((__m512i *)(tile + 128), _mm512_sub_epi8(_mm512_add_epi8(r0, h0), off));
+ _mm512_storeu_si512((__m512i *)(tile + 192), _mm512_sub_epi8(_mm512_add_epi8(r1, h1), off));
+}
+
+template <>
+void unpack_B<block_iq4_xs>(int8_t * RESTRICT tile, const void * RESTRICT packed_B, int k) {
+ static const __m512i values128 = _mm512_set_epi8(
+ 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127,
+ 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127,
+ 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127,
+ 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127
+ );
+
+ const int packed_B_group_size = QK_K / 2 * TILE_N / 8;
+ const char * pb = (const char *)packed_B + k * packed_B_group_size;
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+
+ for (int n = 0; n < 8; n += 2) {
+ __m512i bytes = _mm512_loadu_si512(pb + n * 32);
+ const __m512i r0 = _mm512_shuffle_epi8(values128, _mm512_and_si512(bytes, lowMask));
+ const __m512i r1 = _mm512_shuffle_epi8(values128, _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask));
+ _mm512_storeu_si512((__m512i *)(tile + n * 64 + 0), r0);
+ _mm512_storeu_si512((__m512i *)(tile + n * 64 + 64), r1);
+ }
+}
+
+template <typename TA, typename TB, bool is_acc>
+struct acc_C {};
+
+template <bool is_acc>
+struct acc_C<block_q8_0, block_q4_0, is_acc> {
+ static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_0 * A, int lda, const void * packed_B, int nr) {
+ const int offset = TILE_N * TILE_K / 2;
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset)));
+
+ for (int m = 0; m < nr; ++m) {
+ const __m512 vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].d));
+ const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
+
+ __m512 vsum;
+ if (is_acc) {
+ vsum = _mm512_loadu_ps(C + m * ldc);
+ } else {
+ vsum = _mm512_set1_ps(0.f);
+ }
+ vsum = _mm512_fmadd_ps(vtile, _mm512_mul_ps(vd0, vd1), vsum);
+ _mm512_storeu_ps(C + m * ldc, vsum);
+ }
+ }
+};
+
+template <bool is_acc>
+struct acc_C<block_q8_1, block_q4_1, is_acc> {
+ static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_1 * A, int lda, const void * packed_B, int nr) {
+ const int offset = TILE_N * TILE_K / 2;
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset)));
+ const __m512 vm0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset + TILE_N * sizeof(ggml_half))));
+
+ for (int m = 0; m < nr; ++m) {
+ const __m512 vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].d));
+ const __m512 vs1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].s));
+ const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
+
+ __m512 vsum;
+ if (is_acc) {
+ vsum = _mm512_loadu_ps(C + m * ldc);
+ } else {
+ vsum = _mm512_set1_ps(0.f);
+ }
+ vsum = _mm512_fmadd_ps(vtile, _mm512_mul_ps(vd0, vd1), vsum);
+ vsum = _mm512_fmadd_ps(vm0, vs1, vsum);
+ _mm512_storeu_ps(C + m * ldc, vsum);
+ }
+ }
+};
+
+template <bool is_acc>
+struct acc_C<block_q8_0, block_q8_0, is_acc> {
+ static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_0 * A, int lda, const void * packed_B, int nr) {
+ const int offset = TILE_N * TILE_K;
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)((const char *)packed_B + offset)));
+
+ for (int m = 0; m < nr; ++m) {
+ const __m512 vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[m * lda].d));
+ const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
+
+ __m512 vsum;
+ if (is_acc) {
+ vsum = _mm512_loadu_ps(C + m * ldc);
+ } else {
+ vsum = _mm512_set1_ps(0.f);
+ }
+ vsum = _mm512_fmadd_ps(vtile, _mm512_mul_ps(vd0, vd1), vsum);
+ _mm512_storeu_ps(C + m * ldc, vsum);
+ }
+ }
+};
+
+template <bool is_acc>
+struct acc_C<block_q8_K, block_q4_K, is_acc> {
+ static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_K * A, int lda, const void * packed_B, int nr) {
+ const uint8_t * scales = reinterpret_cast<const uint8_t *>((const char *)packed_B + (QK_K / 2) * TILE_N);
+ const uint8_t * mins = scales + 8 * TILE_N;
+ const ggml_half * d0 = reinterpret_cast<const ggml_half *>(mins + 8 * TILE_N);
+ const ggml_half * dmin = d0 + TILE_N;
+
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)d0));
+ const __m512 vdmin = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)dmin));
+
+ for (int m = 0; m < nr; ++m) {
+ const float d1 = A[m * lda].d;
+ const __m512 vd = _mm512_mul_ps(_mm512_set1_ps(d1), vd0);
+ const __m512 vdm = _mm512_mul_ps(_mm512_set1_ps(-d1), vdmin);
+ const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
+
+ __m512 vsum;
+ if (is_acc) {
+ vsum = _mm512_loadu_ps(C + m * ldc);
+ } else {
+ vsum = _mm512_set1_ps(0.f);
+ }
+
+ const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[m * lda].bsums);
+ const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
+
+ __m512i acc_m = _mm512_setzero_si512();
+ for (int k = 0; k < 4; ++k) {
+ __m512i vmask = _mm512_set1_epi32(k);
+ __m512i va = _mm512_permutexvar_epi32(vmask, _mm512_castsi128_si512(q8s));
+ __m512i vb = _mm512_cvtepi8_epi16(_mm256_loadu_si256((const __m256i *)(mins + k * 32)));
+ acc_m = _mm512_dpwssds_epi32(acc_m, va, vb);
+ }
+
+ vsum = _mm512_fmadd_ps(vtile, vd, vsum);
+ vsum = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc_m), vdm, vsum);
+ _mm512_storeu_ps(C + m * ldc, vsum);
+ }
+ }
+};
+
+template <bool is_acc>
+struct acc_C<block_q8_K, block_q5_K, is_acc> {
+ static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_K * A, int lda, const void * packed_B, int nr) {
+ const uint8_t * scales = reinterpret_cast<const uint8_t *>((const char *)packed_B + (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N);
+ const uint8_t * mins = scales + 8 * TILE_N;
+ const ggml_half * d0 = reinterpret_cast<const ggml_half *>(mins + 8 * TILE_N);
+ const ggml_half * dmin = d0 + TILE_N;
+
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)d0));
+ const __m512 vdmin = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)dmin));
+
+ for (int m = 0; m < nr; ++m) {
+ const float d1 = A[m * lda].d;
+ const __m512 vd = _mm512_mul_ps(_mm512_set1_ps(d1), vd0);
+ const __m512 vdm = _mm512_mul_ps(_mm512_set1_ps(-d1), vdmin);
+ const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
+
+ __m512 vsum;
+ if (is_acc) {
+ vsum = _mm512_loadu_ps(C + m * ldc);
+ } else {
+ vsum = _mm512_set1_ps(0.f);
+ }
+
+ const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[m * lda].bsums);
+ const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
+
+ __m512i acc_m = _mm512_setzero_si512();
+ for (int k = 0; k < 4; ++k) {
+ __m512i vmask = _mm512_set1_epi32(k);
+ __m512i va = _mm512_permutexvar_epi32(vmask, _mm512_castsi128_si512(q8s));
+ __m512i vb = _mm512_cvtepi8_epi16(_mm256_loadu_si256((const __m256i *)(mins + k * 32)));
+ acc_m = _mm512_dpwssds_epi32(acc_m, va, vb);
+ }
+
+ vsum = _mm512_fmadd_ps(vtile, vd, vsum);
+ vsum = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc_m), vdm, vsum);
+ _mm512_storeu_ps(C + m * ldc, vsum);
+ }
+ }
+};
+
+template <bool is_acc>
+struct acc_C<block_q8_K, block_q6_K, is_acc> {
+ static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_K * A, int lda, const void * packed_B, int nr) {
+ const uint8_t * scales = reinterpret_cast<const uint8_t *>((const char *)packed_B + (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N);
+ const ggml_half * d0 = reinterpret_cast<const ggml_half *>(scales + 16 * TILE_N);
+
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)d0));
+
+ for (int m = 0; m < nr; ++m) {
+ const float d1 = A[m * lda].d;
+ const __m512 vd = _mm512_mul_ps(_mm512_set1_ps(d1), vd0);
+ const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
+
+ __m512 vsum;
+ if (is_acc) {
+ vsum = _mm512_loadu_ps(C + m * ldc);
+ } else {
+ vsum = _mm512_set1_ps(0.f);
+ }
+
+ vsum = _mm512_fmadd_ps(vtile, vd, vsum);
+ _mm512_storeu_ps(C + m * ldc, vsum);
+ }
+ }
+};
+
+template <bool is_acc>
+struct acc_C<block_q8_K, block_iq4_xs, is_acc> {
+ static void apply(float * RESTRICT C, int ldc, const int32_t * RESTRICT tile, const block_q8_K * A, int lda, const void * packed_B, int nr) {
+ const int8_t * scales = reinterpret_cast<const int8_t *>((const char *)packed_B + (QK_K / 2) * TILE_N);
+ const ggml_half * d0 = reinterpret_cast<const ggml_half *>(scales + 8 * TILE_N);
+
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)d0));
+
+ for (int m = 0; m < nr; ++m) {
+ const float d1 = A[m * lda].d;
+ const __m512 vd = _mm512_mul_ps(_mm512_set1_ps(d1), vd0);
+ const __m512 vtile = _mm512_cvtepi32_ps(_mm512_loadu_si512(tile + m * TILE_N));
+
+ __m512 vsum;
+ if (is_acc) {
+ vsum = _mm512_loadu_ps(C + m * ldc);
+ } else {
+ vsum = _mm512_set1_ps(0.f);
+ }
+
+ vsum = _mm512_fmadd_ps(vtile, vd, vsum);
+ _mm512_storeu_ps(C + m * ldc, vsum);
+ }
+ }
+};
+
+template <typename TB> constexpr int get_quants_size();
+template <> constexpr int get_quants_size<block_q4_K>() { return (QK_K / 2) * TILE_N; }
+template <> constexpr int get_quants_size<block_q5_K>() { return (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N; }
+template <> constexpr int get_quants_size<block_q6_K>() { return (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N; }
+template <> constexpr int get_quants_size<block_iq4_xs>() { return (QK_K / 2) * TILE_N; }
+
+// used for QKK format
+template <typename TB, bool is_acc,
+ typename std::enable_if<is_type_qkk<TB>::value, int>::type = 0>
+inline void scale_C(const int32_t * RESTRICT tile, int32_t * RESTRICT sumi, const void * packed_B, int k, int nr) {
+ const uint8_t * scales = reinterpret_cast<const uint8_t *>((const char *)packed_B + get_quants_size<TB>());
+ const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(scales + k * TILE_N)));
+
+ for (int m = 0; m < nr; ++m) {
+ __m512i vsumi;
+ if (is_acc) {
+ vsumi = _mm512_loadu_si512(sumi + m * TILE_N);
+ } else {
+ vsumi = _mm512_setzero_si512();
+ }
+ __m512i vtile = _mm512_loadu_si512(tile + m * TILE_N);
+ vsumi = _mm512_add_epi32(vsumi, _mm512_mullo_epi32(vtile, vscale));
+ _mm512_storeu_si512((__m512i *)(sumi + m * TILE_N), vsumi);
+ }
+}
+
+template <typename TA, typename TB, typename TC, int BLOCK_M, int BLOCK_N, int BLOCK_K>
+struct tinygemm_kernel_avx {
+ static void apply(int K, const TA * RESTRICT A, const TB * RESTRICT B, TC * RESTRICT C, int ldc) {
+ GGML_UNUSED(K);
+ GGML_UNUSED(A);
+ GGML_UNUSED(B);
+ GGML_UNUSED(C);
+ GGML_UNUSED(ldc);
+ }
+};
+
+template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
+struct tinygemm_kernel_avx<float, ggml_fp16_t, float, BLOCK_M, BLOCK_N, BLOCK_K> {
+ static void apply(int K, const float * RESTRICT A, const ggml_fp16_t * RESTRICT B, float * RESTRICT C, int ldc) {
+ constexpr int ROWS = BLOCK_M;
+ constexpr int COLS = BLOCK_N;
+ assert(BLOCK_K == 16);
+
+ __m512 va;
+ __m512 vb[COLS];
+ __m512 vc[ROWS * COLS];
+
+ auto loadc = [&](int idx) {
+ vc[idx] = _mm512_setzero_ps();
+ };
+ Unroll<ROWS * COLS>{}(loadc);
+
+ auto compute = [&](int idx, int k) {
+ // TODO: use `constexpr` here to get rid of interger div
+ // when upgraded to C++17
+ const int row = idx / COLS;
+ const int col = idx % COLS;
+
+ if (col == 0) {
+ va = _mm512_loadu_ps(A + row * K + k);
+ }
+ if (row == 0) {
+ vb[col] = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(B + col * K + k)));
+ }
+ vc[idx] = _mm512_fmadd_ps(va, vb[col], vc[idx]);
+ };
+
+ for (int k = 0; k < K; k += 16) {
+ Unroll<ROWS * COLS>{}(compute, k);
+ }
+
+ auto storec = [&](int idx) {
+ const int row = idx / COLS;
+ const int col = idx % COLS;
+ C[row * ldc + col] = _mm512_reduce_add_ps(vc[idx]);
+ };
+ Unroll<ROWS * COLS>{}(storec);
+ }
+};
+
+#define LAUNCH_TINYGEMM_KERNEL_AVX(MB_SIZE, NB_SIZE) \
+ tinygemm_kernel_avx<float, type, float, MB_SIZE, NB_SIZE, blck_size>::apply( \
+ K, (const float *)src1->data + mb_start * K, \
+ (const type *)src0->data + nb_start * K, \
+ (float *)dst->data + mb_start * ldc + nb_start, ldc);
+
+
+// re-organize in the format {NB, KB, TILE_SIZE}:
+#define PACKED_INDEX(n, k, KB, tile_size) (n * KB + k) * tile_size
+
+template<typename TB, int BLOCK_K>
+void convert_B_packed_format(void * RESTRICT packed_B, const TB * RESTRICT B, int N, int K, int n_threads) {
+ const int NB = N / TILE_N;
+ const int KB = K / BLOCK_K;
+ const int TILE_SIZE = get_tile_size<TB>();
+
+ // parallel on NB should be enough
+ parallel_for(n_threads, NB, [&](int begin, int end) {
+ for (int n = begin; n < end; ++n) {
+ for (int k = 0; k < KB; ++k) {
+ int n0 = n * TILE_N;
+ pack_B((char *)packed_B + PACKED_INDEX(n, k, KB, TILE_SIZE), &B[n0 * KB + k], KB);
+ }
+ }
+ });
+}
+
+template <typename TA, typename TB, typename TC, int BLOCK_M, int BLOCK_N, int BLOCK_K>
+struct tinygemm_kernel_vnni {};
+
+template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
+struct tinygemm_kernel_vnni<block_q8_0, block_q4_0, float, BLOCK_M, BLOCK_N, BLOCK_K> {
+ static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
+
+ constexpr int COLS = BLOCK_N / 16;
+ const int TILE_SIZE = TILE_N * sizeof(block_q4_0);
+
+ const block_q8_0 * RESTRICT A = static_cast<const block_q8_0 *>(_A);
+ const char * RESTRICT B = static_cast<const char *>(_B);
+
+ __m512i va[8];
+ __m512 vc[COLS];
+ __m512 vd1;
+
+ // sum of offsets, shared across COLS
+ //
+ // avx512-vnni does not have `_mm512_dpbssd_epi32`,
+ // need to transfrom ss to us:
+ // a * (b - 8) is equavilent to b * a - 8 * a
+ // s u u u s u s
+ //
+ __m512i vcomp;
+
+ const __m512i off = _mm512_set1_epi8(8);
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+
+ auto loadc = [&](int col) {
+ vc[col] = _mm512_setzero_ps();
+ };
+ Unroll<COLS>{}(loadc);
+
+ auto compute = [&](int col, int i) {
+ // load a and compute compensation
+ if (col == 0) {
+ const int32_t * a_ptr = reinterpret_cast<const int32_t *>(A[0 * KB + i].qs);
+ vcomp = _mm512_setzero_si512();
+ for (int k = 0; k < 8; ++k) {
+ va[k] = _mm512_set1_epi32(a_ptr[k]);
+ vcomp = _mm512_dpbusd_epi32(vcomp, off, va[k]);
+ }
+ vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[0 * KB + i].d));
+ }
+
+ // load b
+ __m512i vsum = _mm512_setzero_si512();
+ const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
+ for (int k = 0; k < 8; k += 2) {
+ __m512i bytes = _mm512_loadu_si512((const __m512i *)(b_ptr + k * 32));
+ __m512i vb0 = _mm512_and_si512(bytes, lowMask);
+ vsum = _mm512_dpbusd_epi32(vsum, vb0, va[k + 0]);
+ __m512i vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
+ vsum = _mm512_dpbusd_epi32(vsum, vb1, va[k + 1]);
+ }
+ const int offset = TILE_N * TILE_K / 2;
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset)));
+ vsum = _mm512_sub_epi32(vsum, vcomp);
+
+ vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(vsum), _mm512_mul_ps(vd0, vd1), vc[col]);
+ };
+
+ for (int i = 0; i < KB; ++i) {
+ Unroll<COLS>{}(compute, i);
+ }
+
+ //store to C
+ auto storec = [&](int col) {
+ _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
+ };
+ Unroll<COLS>{}(storec);
+ }
+};
+
+template <int BLOCK_N, int BLOCK_K>
+struct tinygemm_kernel_vnni<block_q8_1, block_q4_1, float, 1, BLOCK_N, BLOCK_K> {
+ static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
+
+ constexpr int COLS = BLOCK_N / 16;
+ const int TILE_SIZE = TILE_N * sizeof(block_q4_1);
+
+ const block_q8_1 * RESTRICT A = static_cast<const block_q8_1 *>(_A);
+ const char * RESTRICT B = static_cast<const char *>(_B);
+
+ __m512i va[8];
+ __m512i vb[8];
+ __m512 vc[COLS];
+ __m512 vd1, vs1;
+
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+
+ auto loadc = [&](int col) {
+ vc[col] = _mm512_setzero_ps();
+ };
+ Unroll<COLS>{}(loadc);
+
+ auto compute = [&](int col, int i) {
+ // load a
+ if (col == 0) {
+ const int32_t * a_ptr = reinterpret_cast<const int32_t *>(A[0 * KB + i].qs);
+ for (int k = 0; k < 8; ++k) {
+ va[k] = _mm512_set1_epi32(a_ptr[k]);
+ }
+ vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[0 * KB + i].d));
+ vs1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[0 * KB + i].s));
+ }
+
+ // load b
+ const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
+ for (int k = 0; k < 8; k += 2) {
+ __m512i bytes = _mm512_loadu_si512((const __m512i *)(b_ptr + k * 32));
+ vb[k + 0] = _mm512_and_si512(bytes, lowMask);
+ vb[k + 1] = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
+ }
+ const int offset = TILE_N * TILE_K / 2;
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset)));
+ const __m512 vm0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset + TILE_N * sizeof(ggml_half))));
+
+ __m512i vsum = _mm512_setzero_si512();
+ for (int k = 0; k < 8; ++k) {
+ vsum = _mm512_dpbusd_epi32(vsum, vb[k], va[k]);
+ }
+
+ vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(vsum), _mm512_mul_ps(vd0, vd1), vc[col]);
+ vc[col] = _mm512_fmadd_ps(vm0, vs1, vc[col]);
+ };
+
+ for (int i = 0; i < KB; ++i) {
+ Unroll<COLS>{}(compute, i);
+ }
+
+ //store to C
+ auto storec = [&](int col) {
+ _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
+ };
+ Unroll<COLS>{}(storec);
+ }
+};
+
+template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
+struct tinygemm_kernel_vnni<block_q8_0, block_q8_0, float, BLOCK_M, BLOCK_N, BLOCK_K> {
+ static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
+
+ constexpr int COLS = BLOCK_N / 16;
+ const int TILE_SIZE = TILE_N * sizeof(block_q8_0) + TILE_N * sizeof(int32_t);
+
+ const block_q8_0 * RESTRICT A = static_cast<const block_q8_0 *>(_A);
+ const char * RESTRICT B = static_cast<const char *>(_B);
+
+ __m512i va[8];
+ __m512i vb[8];
+ __m512 vc[COLS];
+ __m512 vd1;
+
+ // Notes: s8s8 igemm compensation in avx512-vnni
+ // change s8s8 to u8s8 with compensate
+ // a * b = (a + 128) * b - 128 * b
+ // s s u s u s
+ //
+ // (128 * b is pre-computed when packing B to vnni formats)
+ //
+ const __m512i off = _mm512_set1_epi8(static_cast<char>(0x80));
+
+ auto loadc = [&](int col) {
+ vc[col] = _mm512_setzero_ps();
+ };
+ Unroll<COLS>{}(loadc);
+
+ auto compute = [&](int col, int i) {
+ // load a and add offset 128
+ if (col == 0) {
+ const int32_t * a_ptr = reinterpret_cast<const int32_t *>(A[0 * KB + i].qs);
+ for (int k = 0; k < 8; ++k) {
+ va[k] = _mm512_set1_epi32(a_ptr[k]);
+ va[k] = _mm512_add_epi8(va[k], off);
+ }
+ vd1 = _mm512_set1_ps(GGML_FP16_TO_FP32(A[0 * KB + i].d));
+ }
+
+ // load b
+ const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
+ for (int k = 0; k < 8; ++k) {
+ vb[k] = _mm512_loadu_si512((const __m512i *)(b_ptr + k * 64));
+ }
+ const int offset = TILE_N * TILE_K;
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset)));
+ const int offset2 = TILE_N * TILE_K + TILE_N * sizeof(ggml_half);
+ const __m512i vcomp = _mm512_loadu_si512((const __m512i *)(b_ptr + offset2));
+
+ __m512i vsum = _mm512_setzero_si512();
+ for (int k = 0; k < 8; ++k) {
+ vsum = _mm512_dpbusd_epi32(vsum, va[k], vb[k]);
+ }
+ vsum = _mm512_sub_epi32(vsum, vcomp);
+
+ vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(vsum), _mm512_mul_ps(vd0, vd1), vc[col]);
+ };
+
+ for (int i = 0; i < KB; ++i) {
+ Unroll<COLS>{}(compute, i);
+ }
+
+ //store to C
+ auto storec = [&](int col) {
+ _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
+ };
+ Unroll<COLS>{}(storec);
+ }
+};
+
+template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
+struct tinygemm_kernel_vnni<block_q8_K, block_q4_K, float, BLOCK_M, BLOCK_N, BLOCK_K> {
+ static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
+
+ constexpr int COLS = BLOCK_N / 16;
+ const int TILE_SIZE = TILE_N * sizeof(block_q4_K) + TILE_N * 4;
+
+ const block_q8_K * RESTRICT A = static_cast<const block_q8_K *>(_A);
+ const char * RESTRICT B = static_cast<const char *>(_B);
+
+ // a.qs: 8 groups, 32 bytes each group (m256i)
+ __m512i va[8];
+ // a.bsum: 8 groups, 2 bytes each group (m128i)
+ __m512i va_bsum;
+ __m512 vc[COLS];
+ __m512 vd1;
+
+ // packed_B:
+ const int offset_scales = (QK_K / 2) * TILE_N;
+ const int offset_mins = (QK_K / 2) * TILE_N + 8 * TILE_N;
+ const int offset_d0 = (QK_K / 2) * TILE_N + 16 * TILE_N;
+ const int offset_dmin = (QK_K / 2) * TILE_N + 16 * TILE_N + TILE_N * sizeof(ggml_half);
+
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+
+ auto loadc = [&](int col) {
+ vc[col] = _mm512_setzero_ps();
+ };
+ Unroll<COLS>{}(loadc);
+
+ // Notes: vnni formats in QK_K
+ // a) quants vnni format
+ // int8 {k/4, n, 4}, viewed as 2d {k/4, 4n}, k = 32
+ // from {16, 32} to {8, 64}
+ //
+ // b) min vnni format
+ // int16 {k/2, n, 2}, viewed as 2d {k/2, 2n}, k = 8
+ // from {16, 8} to {4, 32}
+ //
+ auto compute = [&](int col, int i) {
+ // load a
+ if (col == 0) {
+ for (int k_group = 0; k_group < QK_K / 32; ++k_group) {
+ va[k_group] = _mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)(A[0 * KB + i].qs + k_group * 32)));
+ }
+ const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[0 * KB + i].bsums);
+ const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
+ va_bsum = _mm512_castsi128_si512(q8s);
+ vd1 = _mm512_set1_ps(A[0 * KB + i].d);
+ }
+
+ // step 1: accumultate the quants
+ __m512i acc = _mm512_setzero_si512();
+ const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
+ const char * b_qs = b_ptr;
+ for (int k_group = 0; k_group < QK_K / 32; ++k_group) {
+ __m512i vsum = _mm512_setzero_si512();
+ for (int k = 0; k < 8; k += 2) {
+ __m512i va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(k + 0), va[k_group]);
+ __m512i va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(k + 1), va[k_group]);
+
+ __m512i bytes = _mm512_loadu_si512((const __m512i *)b_qs);
+ __m512i vb0 = _mm512_and_si512(bytes, lowMask);
+ vsum = _mm512_dpbusd_epi32(vsum, vb0, va0);
+ __m512i vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
+ vsum = _mm512_dpbusd_epi32(vsum, vb1, va1);
+
+ b_qs += 64;
+ }
+ // vacc += scale * (q8 @ q4)
+ const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(b_ptr + offset_scales + k_group * TILE_N)));
+ acc = _mm512_add_epi32(acc, _mm512_mullo_epi32(vsum, vscale));
+ }
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_d0)));
+ vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc), _mm512_mul_ps(vd0, vd1), vc[col]);
+
+ // step 2: accumulate the mins
+ __m512i acc_m = _mm512_setzero_si512();
+ for (int k = 0; k < 4; ++k) {
+ __m512i vmask = _mm512_set1_epi32(k);
+ __m512i va = _mm512_permutexvar_epi32(vmask, va_bsum);
+ __m512i vb = _mm512_cvtepi8_epi16(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_mins + k * 32)));
+ acc_m = _mm512_dpwssds_epi32(acc_m, va, vb);
+ }
+ const __m512 vdmin = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_dmin)));
+ vc[col] = _mm512_fnmadd_ps(_mm512_cvtepi32_ps(acc_m), _mm512_mul_ps(vdmin, vd1), vc[col]);
+ };
+
+ for (int i = 0; i < KB; ++i) {
+ Unroll<COLS>{}(compute, i);
+ }
+
+ //store to C
+ auto storec = [&](int col) {
+ _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
+ };
+ Unroll<COLS>{}(storec);
+ }
+};
+
+template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
+struct tinygemm_kernel_vnni<block_q8_K, block_q5_K, float, BLOCK_M, BLOCK_N, BLOCK_K> {
+ static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
+
+ constexpr int COLS = BLOCK_N / 16;
+ const int TILE_SIZE = TILE_N * sizeof(block_q5_K) + TILE_N * 4;
+
+ const block_q8_K * RESTRICT A = static_cast<const block_q8_K *>(_A);
+ const char * RESTRICT B = static_cast<const char *>(_B);
+
+ // a.qs: 8 groups, 32 bytes each group (m256i)
+ __m512i va[8];
+ // a.bsum: 8 groups, 2 bytes each group (m128i)
+ __m512i va_bsum;
+ __m512 vc[COLS];
+ __m512 vd1;
+
+ // packed_B:
+ const int offset_qh = (QK_K / 2) * TILE_N;
+ const int offset_scales = (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N;
+ const int offset_mins = (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N + 8 * TILE_N;
+ const int offset_d0 = (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N + 16 * TILE_N;
+ const int offset_dmin = (QK_K / 2) * TILE_N + (QK_K / 8) * TILE_N + 16 * TILE_N + TILE_N * sizeof(ggml_half);
+
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+
+ auto loadc = [&](int col) {
+ vc[col] = _mm512_setzero_ps();
+ };
+ Unroll<COLS>{}(loadc);
+
+ // Q5_K and Q4_K shares the same vnni formats, refer to notes above.
+ auto compute = [&](int col, int i) {
+ // load a
+ if (col == 0) {
+ for (int k_group = 0; k_group < QK_K / 32; ++k_group) {
+ va[k_group] = _mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)(A[0 * KB + i].qs + k_group * 32)));
+ }
+ const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[0 * KB + i].bsums);
+ const __m128i q8s = _mm_hadd_epi16(_mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1));
+ va_bsum = _mm512_castsi128_si512(q8s);
+ vd1 = _mm512_set1_ps(A[0 * KB + i].d);
+ }
+
+ // step 1: accumultate the quants
+ __m512i acc = _mm512_setzero_si512();
+ const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
+ const char * b_qs = b_ptr;
+ const char * b_qh = b_ptr + offset_qh;
+ for (int k_group = 0; k_group < QK_K / 32; ++k_group) {
+ __m512i vsum = _mm512_setzero_si512();
+ __m512i hmask0 = _mm512_set1_epi8(0x1);
+ __m512i hmask1 = _mm512_set1_epi8(0x2);
+ __m512i hbits = _mm512_loadu_si512((const __m512i *)(b_qh + k_group * 64));
+ for (int k = 0; k < 8; k += 2) {
+ __m512i va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(k + 0), va[k_group]);
+ __m512i va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(k + 1), va[k_group]);
+
+ __m512i bytes = _mm512_loadu_si512((const __m512i *)b_qs);
+ __m512i vb0 = _mm512_and_si512(bytes, lowMask);
+ __m512i vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
+
+ __m512i vh0 = _mm512_slli_epi16(_mm512_srli_epi16(_mm512_and_si512(hbits, hmask0), k), 4);
+ __m512i vh1 = _mm512_slli_epi16(_mm512_srli_epi16(_mm512_and_si512(hbits, hmask1), k + 1), 4);
+
+ hmask0 = _mm512_slli_epi16(hmask0, 2);
+ hmask1 = _mm512_slli_epi16(hmask1, 2);
+ vb0 = _mm512_add_epi8(vb0, vh0);
+ vb1 = _mm512_add_epi8(vb1, vh1);
+
+ vsum = _mm512_dpbusd_epi32(vsum, vb0, va0);
+ vsum = _mm512_dpbusd_epi32(vsum, vb1, va1);
+
+ b_qs += 64;
+ }
+ // vacc += scale * (q8 @ q5)
+ const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(b_ptr + offset_scales + k_group * TILE_N)));
+ acc = _mm512_add_epi32(acc, _mm512_mullo_epi32(vsum, vscale));
+ }
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_d0)));
+ vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc), _mm512_mul_ps(vd0, vd1), vc[col]);
+
+ // step 2: accumulate the mins
+ __m512i acc_m = _mm512_setzero_si512();
+ for (int k = 0; k < 4; ++k) {
+ __m512i vmask = _mm512_set1_epi32(k);
+ __m512i va = _mm512_permutexvar_epi32(vmask, va_bsum);
+ __m512i vb = _mm512_cvtepi8_epi16(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_mins + k * 32)));
+ acc_m = _mm512_dpwssds_epi32(acc_m, va, vb);
+ }
+ const __m512 vdmin = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_dmin)));
+ vc[col] = _mm512_fnmadd_ps(_mm512_cvtepi32_ps(acc_m), _mm512_mul_ps(vdmin, vd1), vc[col]);
+ };
+
+ for (int i = 0; i < KB; ++i) {
+ Unroll<COLS>{}(compute, i);
+ }
+
+ //store to C
+ auto storec = [&](int col) {
+ _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
+ };
+ Unroll<COLS>{}(storec);
+ }
+};
+
+template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
+struct tinygemm_kernel_vnni<block_q8_K, block_q6_K, float, BLOCK_M, BLOCK_N, BLOCK_K> {
+ static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
+
+ constexpr int COLS = BLOCK_N / 16;
+ const int TILE_SIZE = TILE_N * sizeof(block_q6_K);
+
+ const block_q8_K * RESTRICT A = static_cast<const block_q8_K *>(_A);
+ const char * RESTRICT B = static_cast<const char *>(_B);
+
+ // load the 256 bytes from A to 4 avx512 vectors
+ __m512i va[4];
+ __m512 vc[COLS];
+ __m512 vd1;
+
+ // packed_B:
+ const int offset_qh = (QK_K / 2) * TILE_N;
+ const int offset_scales = (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N;
+ const int offset_d0 = (QK_K / 2) * TILE_N + (QK_K / 4) * TILE_N + 16 * TILE_N;
+
+ // compensation
+ __m512i vcomp;
+
+ const __m512i m32s = _mm512_set1_epi32(32);
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+
+ auto loadc = [&](int col) {
+ vc[col] = _mm512_setzero_ps();
+ };
+ Unroll<COLS>{}(loadc);
+
+ auto compute = [&](int col, int i) {
+ if (col == 0) {
+ // load a
+ va[0] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 0));
+ va[1] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 64));
+ va[2] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 128));
+ va[3] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 192));
+
+ const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[0 * KB + i].bsums);
+ vcomp = _mm512_mullo_epi32(_mm512_cvtepi16_epi32(q8sums), m32s);
+ vd1 = _mm512_set1_ps(A[0 * KB + i].d);
+ }
+
+ // accmulate the quants
+ __m512i acc = _mm512_setzero_si512();
+ const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
+ const char * b_qs = b_ptr;
+ const char * b_qh = b_ptr + offset_qh;
+ int mask = 0;
+ for (int k_group = 0; k_group < QK_K / 16; ++k_group) {
+ int r = k_group >> 2;
+ __m512i va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]);
+ __m512i va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]);
+
+ __m512i vsum = _mm512_setzero_si512();
+ __m512i hmask = _mm512_set1_epi8(0x3);
+
+ __m512i bytes = _mm512_loadu_si512(b_qs);
+ __m512i hbits = _mm512_loadu_si512(b_qh);
+ __m512i vb0 = _mm512_and_si512(bytes, lowMask);
+ __m512i vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
+ __m512i vh0 = _mm512_slli_epi16(_mm512_and_si512(hbits, hmask), 4);
+ __m512i vh1 = _mm512_slli_epi16(_mm512_and_si512(hbits, _mm512_slli_epi16(hmask, 2)), 2);
+
+ vb0 = _mm512_add_epi8(vb0, vh0);
+ vb1 = _mm512_add_epi8(vb1, vh1);
+ vsum = _mm512_dpbusd_epi32(vsum, vb0, va0);
+ vsum = _mm512_dpbusd_epi32(vsum, vb1, va1);
+ b_qs += 64;
+
+ va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]);
+ va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]);
+
+ bytes = _mm512_loadu_si512(b_qs);
+ vb0 = _mm512_and_si512(bytes, lowMask);
+ vb1 = _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask);
+ vh0 = _mm512_and_si512(hbits, _mm512_slli_epi16(hmask, 4));
+ vh1 = _mm512_srli_epi16(_mm512_and_si512(hbits, _mm512_slli_epi16(hmask, 6)), 2);
+ vb0 = _mm512_add_epi8(vb0, vh0);
+ vb1 = _mm512_add_epi8(vb1, vh1);
+ vsum = _mm512_dpbusd_epi32(vsum, vb0, va0);
+ vsum = _mm512_dpbusd_epi32(vsum, vb1, va1);
+ b_qs += 64;
+ b_qh += 64;
+
+ // B * A - 32 * A
+ __m512i vmask = _mm512_set1_epi32(k_group);
+ vsum = _mm512_sub_epi32(vsum, _mm512_permutexvar_epi32(vmask, vcomp));
+
+ // vacc += scale * (q8 @ q6)
+ const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(b_ptr + offset_scales + k_group * TILE_N)));
+ acc = _mm512_add_epi32(acc, _mm512_mullo_epi32(vsum, vscale));
+ }
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_d0)));
+ vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc), _mm512_mul_ps(vd0, vd1), vc[col]);
+ };
+
+ for (int i = 0; i < KB; ++i) {
+ Unroll<COLS>{}(compute, i);
+ }
+
+ //store to C
+ auto storec = [&](int col) {
+ _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
+ };
+ Unroll<COLS>{}(storec);
+ }
+};
+
+template <int BLOCK_M, int BLOCK_N, int BLOCK_K>
+struct tinygemm_kernel_vnni<block_q8_K, block_iq4_xs, float, BLOCK_M, BLOCK_N, BLOCK_K> {
+ static void apply(int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
+
+ constexpr int COLS = BLOCK_N / 16;
+ const int TILE_SIZE = TILE_N * sizeof(block_iq4_xs) + TILE_N * 2;
+
+ const block_q8_K * RESTRICT A = static_cast<const block_q8_K *>(_A);
+ const char * RESTRICT B = static_cast<const char *>(_B);
+
+ // load the 256 bytes from A to 4 avx512 vectors
+ __m512i va[4];
+ __m512 vc[COLS];
+ __m512 vd1;
+
+ // packed_B:
+ const int offset_scales = (QK_K / 2) * TILE_N ;
+ const int offset_d0 = (QK_K / 2) * TILE_N + 8 * TILE_N;
+
+ // compensation
+ __m512i vcomp;
+
+ const __m256i m128s = _mm256_set1_epi16(128);
+ const __m512i lowMask = _mm512_set1_epi8(0xF);
+
+ const __m512i values128 = _mm512_set_epi8(
+ 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127,
+ 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127,
+ 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127,
+ 113, 89, 69, 53, 38, 25, 13, 1, -10, -22, -35, -49, -65, -83, -104, -127
+ );
+ const __m512i off = _mm512_set1_epi8(static_cast<char>(0x80));
+ const __m512i values256 = _mm512_add_epi8(values128, off);
+
+ auto loadc = [&](int col) {
+ vc[col] = _mm512_setzero_ps();
+ };
+ Unroll<COLS>{}(loadc);
+
+ auto compute = [&](int col, int i) {
+ if (col == 0) {
+ // load a
+ va[0] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 0));
+ va[1] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 64));
+ va[2] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 128));
+ va[3] = _mm512_loadu_si512((const __m512i *)(A[0 * KB + i].qs + 192));
+
+ // compensation: 128 * A
+ const __m256i q8sums = _mm256_loadu_si256((const __m256i *)A[0 * KB + i].bsums);
+ vcomp = _mm512_castsi256_si512(_mm256_madd_epi16(q8sums, m128s));
+ vd1 = _mm512_set1_ps(A[0 * KB + i].d);
+ }
+
+ // accmulate the quants
+ __m512i acc = _mm512_setzero_si512();
+ const char * b_ptr = B + PACKED_INDEX(col, i, KB, TILE_SIZE);
+ const char * b_qs = b_ptr;
+ int mask = 0;
+ for (int k_group = 0; k_group < QK_K / 32; ++k_group) {
+ int r = k_group >> 1;
+ __m512i vmask = _mm512_set1_epi32(k_group);
+ __m512i vsum = _mm512_setzero_si512();
+ for (int k = 0; k < 8; k += 2) {
+ __m512i va0 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]);
+ __m512i va1 = _mm512_permutexvar_epi32(_mm512_set1_epi32(mask++), va[r]);
+
+ __m512i bytes = _mm512_loadu_si512(b_qs);
+ __m512i vb0 = _mm512_shuffle_epi8(values256, _mm512_and_si512(bytes, lowMask));
+ __m512i vb1 = _mm512_shuffle_epi8(values256, _mm512_and_si512(_mm512_srli_epi16(bytes, 4), lowMask));
+
+ vsum = _mm512_dpbusd_epi32(vsum, vb0, va0);
+ vsum = _mm512_dpbusd_epi32(vsum, vb1, va1);
+ b_qs += 64;
+ }
+ // (B + 128) * A - 128 * A
+ vsum = _mm512_sub_epi32(vsum, _mm512_permutexvar_epi32(vmask, vcomp));
+
+ // vacc += scale * (q8 @ q4)
+ const __m512i vscale = _mm512_cvtepi8_epi32(_mm_loadu_si128((const __m128i *)(b_ptr + offset_scales + k_group * TILE_N)));
+ acc = _mm512_add_epi32(acc, _mm512_mullo_epi32(vsum, vscale));
+ }
+ const __m512 vd0 = _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)(b_ptr + offset_d0)));
+ vc[col] = _mm512_fmadd_ps(_mm512_cvtepi32_ps(acc), _mm512_mul_ps(vd0, vd1), vc[col]);
+ };
+
+ for (int i = 0; i < KB; ++i) {
+ Unroll<COLS>{}(compute, i);
+ }
+
+ //store to C
+ auto storec = [&](int col) {
+ _mm512_storeu_ps((__m512i*)(C + 0 * ldc + col * 16), vc[col]);
+ };
+ Unroll<COLS>{}(storec);
+ }
+};
+
+#define LAUNCH_TINYGEMM_KERNEL_VNNI(NB_SIZE) \
+ tinygemm_kernel_vnni<vec_dot_type, type, float, 1, NB_SIZE, blck_size>::apply( \
+ KB, (const char *)wdata + 0 * row_size_A, \
+ (const char *)src0->data + PACKED_INDEX(nb * kTilesN, 0, KB, TILE_SIZE), \
+ (float *) dst->data + 0 * N + nb_start, ldc)
+
+template <typename TA, typename TB, typename TC, int BLOCK_K,
+ typename std::enable_if<!is_type_qkk<TB>::value, int>::type = 0>
+void tinygemm_kernel_amx(int M, int N, int KB, const void * RESTRICT _A, const void * RESTRICT _B, TC * RESTRICT C, int ldc) {
+ using packed_B_t = packed_B_type<TB>;
+ const int TILE_SIZE = get_tile_size<TB>();
+ const bool need_unpack = do_unpack<TB>::value;
+
+ GGML_ASSERT(M <= 2 * TILE_M && N == 2 * TILE_N);
+ const TA * RESTRICT A = static_cast<const TA *>(_A);
+ const char * RESTRICT B = static_cast<const char *>(_B);
+
+ const int m0 = std::min(M, TILE_M);
+ const int m1 = std::max(M - TILE_M, 0);
+ const int lda = KB * sizeof(TA);
+ //const int ldb = KB * sizeof(TB);
+
+ static thread_local packed_B_t Tile0[TILE_N * TILE_K];
+ static thread_local packed_B_t Tile1[TILE_N * TILE_K];
+ static thread_local int8_t Tile23[TILE_M * TILE_K];
+
+ static thread_local int32_t TileC0[TILE_M * TILE_N * 4];
+ static thread_local int32_t TileC1[TILE_M * TILE_N * 4];
+
+ // double buffering C to interleave avx512 and amx
+ int32_t * C_cur = TileC0;
+ int32_t * C_pre = TileC1;
+
+ auto Tile4 = [&](int32_t * base) { return base; };
+ auto Tile5 = [&](int32_t * base) { return base + TILE_M * TILE_N; };
+ auto Tile6 = [&](int32_t * base) { return base + 2 * TILE_M * TILE_N; };
+ auto Tile7 = [&](int32_t * base) { return base + 3 * TILE_M * TILE_N; };
+
+ if (M == 2 * TILE_M) {
+ // i = 0
+ const char * B_blk0 = B + PACKED_INDEX(0, 0, KB, TILE_SIZE);
+ const char * B_blk1 = B + PACKED_INDEX(1, 0, KB, TILE_SIZE);
+ if (need_unpack) {
+ unpack_B<TB>(Tile0, B_blk0);
+ _tile_loadd(TMM0, Tile0, TILE_N * VNNI_BLK);
+ } else {
+ _tile_loadd(TMM0, B_blk0, TILE_N * VNNI_BLK);
+ }
+
+ _tile_zero(TMM4);
+ _tile_loadd(TMM2, A[0].qs, lda);
+ _tile_dpbssd(TMM4, TMM2, TMM0);
+ _tile_stored(TMM4, Tile4(C_pre), TILE_N * sizeof(int32_t));
+
+ _tile_zero(TMM5);
+ _tile_loadd(TMM3, A[TILE_M * KB + 0].qs, lda);
+ _tile_dpbssd(TMM5, TMM3, TMM0);
+ _tile_stored(TMM5, Tile5(C_pre), TILE_N * sizeof(int32_t));
+
+ if (need_unpack) {
+ unpack_B<TB>(Tile1, B_blk0);
+ _tile_loadd(TMM1, Tile1, TILE_N * VNNI_BLK);
+ } else {
+ _tile_loadd(TMM1, B_blk1, TILE_N * VNNI_BLK);
+ }
+
+ _tile_zero(TMM6);
+ _tile_dpbssd(TMM6, TMM2, TMM1);
+ _tile_stored(TMM6, Tile6(C_pre), TILE_N * sizeof(int32_t));
+
+ _tile_zero(TMM7);
+ _tile_dpbssd(TMM7, TMM3, TMM1);
+ _tile_stored(TMM7, Tile7(C_pre), TILE_N * sizeof(int32_t));
+
+ for (int i = 1; i < KB; ++i) {
+ // index of previous iter
+ const int ii = i - 1;
+ const char * B_blk0 = B + PACKED_INDEX(0, i, KB, TILE_SIZE);
+ const char * B_blk1 = B + PACKED_INDEX(1, i, KB, TILE_SIZE);
+ GGML_DISPATCH_BOOL(ii > 0, is_acc, [&] {
+ if (need_unpack) {
+ unpack_B<TB>(Tile0, B_blk0);
+ _tile_loadd(TMM0, Tile0, TILE_N * VNNI_BLK);
+ } else {
+ _tile_loadd(TMM0, B_blk0, TILE_N * VNNI_BLK);
+ }
+ _tile_zero(TMM4);
+ _tile_loadd(TMM2, A[i].qs, lda);
+ acc_C<TA, TB, is_acc>::apply(C, ldc, Tile4(C_pre), &A[ii], KB, B + PACKED_INDEX(0, ii, KB, TILE_SIZE), TILE_M);
+
+ _tile_dpbssd(TMM4, TMM2, TMM0);
+ _tile_stored(TMM4, Tile4(C_cur), TILE_N * sizeof(int32_t));
+
+ _tile_zero(TMM5);
+ _tile_loadd(TMM3, A[TILE_M * KB + i].qs, lda);
+ acc_C<TA, TB, is_acc>::apply(C + TILE_M * ldc, ldc, Tile5(C_pre), &A[TILE_M * KB + ii], KB, B + PACKED_INDEX(0, ii, KB, TILE_SIZE), TILE_M);
+
+ _tile_dpbssd(TMM5, TMM3, TMM0);
+ _tile_stored(TMM5, Tile5(C_cur), TILE_N * sizeof(int32_t));
+
+ if (need_unpack) {
+ unpack_B<TB>(Tile1, B_blk1);
+ _tile_loadd(TMM1, Tile1, TILE_N * VNNI_BLK);
+ } else {
+ _tile_loadd(TMM1, B_blk1, TILE_N * VNNI_BLK);
+ }
+ _tile_zero(TMM6);
+ acc_C<TA, TB, is_acc>::apply(C + TILE_N, ldc, Tile6(C_pre), &A[ii], KB, B + PACKED_INDEX(1, ii, KB, TILE_SIZE), TILE_M);
+
+ _tile_dpbssd(TMM6, TMM2, TMM1);
+ _tile_stored(TMM6, Tile6(C_cur), TILE_N * sizeof(int32_t));
+
+ _tile_zero(TMM7);
+ acc_C<TA, TB, is_acc>::apply(C + TILE_M * ldc + TILE_N, ldc, Tile7(C_pre), &A[TILE_M * KB + ii], KB, B + PACKED_INDEX(1, ii, KB, TILE_SIZE), TILE_M);
+
+ _tile_dpbssd(TMM7, TMM3, TMM1);
+ _tile_stored(TMM7, Tile7(C_cur), TILE_N * sizeof(int32_t));
+
+ std::swap(C_cur, C_pre);
+ });
+ }
+ // final accumulation
+ {
+ int ii = KB - 1;
+ acc_C<TA, TB, true>::apply(C, ldc, Tile4(C_pre), &A[ii], KB, B + PACKED_INDEX(0, ii, KB, TILE_SIZE), TILE_M);
+ acc_C<TA, TB, true>::apply(C + TILE_M * ldc, ldc, Tile5(C_pre), &A[TILE_M * KB + ii], KB, B + PACKED_INDEX(0, ii, KB, TILE_SIZE), TILE_M);
+ acc_C<TA, TB, true>::apply(C + TILE_N, ldc, Tile6(C_pre), &A[ii], KB, B + PACKED_INDEX(1, ii, KB, TILE_SIZE), TILE_M);
+ acc_C<TA, TB, true>::apply(C + TILE_M * ldc + TILE_N, ldc, Tile7(C_pre), &A[TILE_M * KB + ii], KB, B + PACKED_INDEX(1, ii, KB, TILE_SIZE), TILE_M);
+ }
+ } else {
+ for (int i = 0; i < KB; ++i) {
+ _tile_zero(TMM4);
+ _tile_zero(TMM6);
+ if (m1 != 0) {
+ _tile_zero(TMM5);
+ _tile_zero(TMM7);
+ }
+
+ const char * B_blk0 = B + PACKED_INDEX(0, i, KB, TILE_SIZE);
+ const char * B_blk1 = B + PACKED_INDEX(1, i, KB, TILE_SIZE);
+ if (need_unpack) {
+ unpack_B<TB>(Tile0, B_blk0);
+ _tile_loadd(TMM0, Tile0, TILE_N * VNNI_BLK);
+ } else {
+ _tile_loadd(TMM0, B_blk0, TILE_N * VNNI_BLK);
+ }
+
+ if (need_unpack) {
+ unpack_B<TB>(Tile1, B_blk1);
+ _tile_loadd(TMM1, Tile1, TILE_N * VNNI_BLK);
+ } else {
+ _tile_loadd(TMM1, B_blk1, TILE_N * VNNI_BLK);
+ }
+
+ if (m0 == TILE_M) {
+ _tile_loadd(TMM2, A[i].qs, lda);
+ } else {
+ unpack_A(Tile23, &A[i], KB, m0);
+ _tile_loadd(TMM2, Tile23, TILE_K);
+ }
+
+ _tile_dpbssd(TMM4, TMM2, TMM0);
+ _tile_dpbssd(TMM6, TMM2, TMM1);
+
+ _tile_stored(TMM4, Tile4(C_cur), TILE_N * sizeof(int32_t));
+ _tile_stored(TMM6, Tile6(C_cur), TILE_N * sizeof(int32_t));
+
+ GGML_DISPATCH_BOOL(i > 0, is_acc, [&] {
+ acc_C<TA, TB, is_acc>::apply(C, ldc, Tile4(C_cur), &A[i], KB, B + PACKED_INDEX(0, i, KB, TILE_SIZE), m0);
+ acc_C<TA, TB, is_acc>::apply(C + TILE_N, ldc, Tile6(C_cur), &A[i], KB, B + PACKED_INDEX(1, i, KB, TILE_SIZE), m0);
+ });
+
+ if (m1 != 0) {
+ unpack_A(Tile23, &A[TILE_M * KB + i], KB, m1);
+ _tile_loadd(TMM3, Tile23, TILE_K);
+
+ _tile_dpbssd(TMM5, TMM3, TMM0);
+ _tile_dpbssd(TMM7, TMM3, TMM1);
+ _tile_stored(TMM5, Tile5(C_cur), TILE_N * sizeof(int32_t));
+ _tile_stored(TMM7, Tile7(C_cur), TILE_N * sizeof(int32_t));
+ GGML_DISPATCH_BOOL(i > 0, is_acc, [&] {
+ acc_C<TA, TB, is_acc>::apply(C + TILE_M * ldc, ldc, Tile5(C_cur), &A[TILE_M * KB + i], KB, B + PACKED_INDEX(0, i, KB, TILE_SIZE), m1);
+ acc_C<TA, TB, is_acc>::apply(C + TILE_M * ldc + TILE_N, ldc, Tile7(C_cur), &A[TILE_M * KB + i], KB, B + PACKED_INDEX(1, i, KB, TILE_SIZE), m1);
+ });
+ }
+ }
+ }
+ return;
+}
+
+template <typename TA, typename TB, typename TC, int BLOCK_K,
+ typename std::enable_if<is_type_qkk<TB>::value, int>::type = 0>
+void tinygemm_kernel_amx(int M, int N, int KB, const void * RESTRICT _A, const void * RESTRICT _B, float * RESTRICT C, int ldc) {
+ static_assert(std::is_same<TA, block_q8_K>::value);
+ const int TILE_SIZE = get_tile_size<TB>();
+
+ GGML_ASSERT(M <= 2 * TILE_M && N == 2 * TILE_N);
+ const TA * RESTRICT A = static_cast<const TA *>(_A);
+ const char * RESTRICT B = static_cast<const char *>(_B);
+
+ const int m0 = std::min(M, TILE_M);
+ const int m1 = std::max(M - TILE_M, 0);
+ //const int lda = KB * sizeof(TA);
+
+ static thread_local int8_t Tile0[TILE_N * TILE_K];
+ static thread_local int8_t Tile1[TILE_N * TILE_K];
+ static thread_local int8_t Tile23[TILE_M * TILE_K];
+
+ // mat mul result for each group
+ static thread_local int32_t Tile4[TILE_M * TILE_N];
+ static thread_local int32_t Tile5[TILE_M * TILE_N];
+ static thread_local int32_t Tile6[TILE_M * TILE_N];
+ static thread_local int32_t Tile7[TILE_M * TILE_N];
+
+ // sum of each QK_K block, contains 8 groups, int32
+ static thread_local int32_t Sumi4[TILE_M * TILE_N];
+ static thread_local int32_t Sumi5[TILE_M * TILE_N];
+ static thread_local int32_t Sumi6[TILE_M * TILE_N];
+ static thread_local int32_t Sumi7[TILE_M * TILE_N];
+
+ const int k_group_size = std::is_same<TB, block_q6_K>::value ? 16 : 32;
+ for (int i = 0; i < KB; ++i) {
+ // step 1: accumulate the quants across 8 groups, each group with 32
+ for (int k = 0; k < QK_K / k_group_size; ++k) {
+ GGML_DISPATCH_BOOL(k > 0, is_acc, [&] {
+ _tile_zero(TMM4);
+ _tile_zero(TMM6);
+
+ unpack_B<TB>(Tile0, B + PACKED_INDEX(0, i, KB, TILE_SIZE), k);
+ _tile_loadd(TMM0, Tile0, TILE_N * VNNI_BLK);
+
+ unpack_B<TB>(Tile1, B + PACKED_INDEX(1, i, KB, TILE_SIZE), k);
+ _tile_loadd(TMM1, Tile1, TILE_N * VNNI_BLK);
+
+ unpack_A<TB>(Tile23, &A[i], KB, k, m0);
+ _tile_loadd(TMM2, Tile23, TILE_K);
+
+ _tile_dpbssd(TMM4, TMM2, TMM0);
+ _tile_dpbssd(TMM6, TMM2, TMM1);
+
+ _tile_stored(TMM4, Tile4, TILE_N * sizeof(int32_t));
+ _tile_stored(TMM6, Tile6, TILE_N * sizeof(int32_t));
+
+ scale_C<TB, is_acc>(Tile4, Sumi4, B + PACKED_INDEX(0, i, KB, TILE_SIZE), k, m0);
+ scale_C<TB, is_acc>(Tile6, Sumi6, B + PACKED_INDEX(1, i, KB, TILE_SIZE), k, m0);
+
+ if (m1 != 0) {
+ _tile_zero(TMM5);
+ _tile_zero(TMM7);
+
+ unpack_A<TB>(Tile23, &A[TILE_M * KB + i], KB, k, m1);
+ _tile_loadd(TMM3, Tile23, TILE_K);
+
+ _tile_dpbssd(TMM5, TMM3, TMM0);
+ _tile_dpbssd(TMM7, TMM3, TMM1);
+
+ _tile_stored(TMM5, Tile5, TILE_N * sizeof(int32_t));
+ _tile_stored(TMM7, Tile7, TILE_N * sizeof(int32_t));
+
+ scale_C<TB, is_acc>(Tile5, Sumi5, B + PACKED_INDEX(0, i, KB, TILE_SIZE), k, m1);
+ scale_C<TB, is_acc>(Tile7, Sumi7, B + PACKED_INDEX(1, i, KB, TILE_SIZE), k, m1);
+ }
+ });
+ }
+
+ // step 2: accmulate the mins
+ GGML_DISPATCH_BOOL(i > 0, is_acc, [&] {
+ acc_C<TA, TB, is_acc>::apply(C, ldc, Sumi4, &A[i], KB, B + PACKED_INDEX(0, i, KB, TILE_SIZE), m0);
+ acc_C<TA, TB, is_acc>::apply(C + TILE_N, ldc, Sumi6, &A[i], KB, B + PACKED_INDEX(1, i, KB, TILE_SIZE), m0);
+ if (m1 != 0) {
+ acc_C<TA, TB, is_acc>::apply(C + TILE_M * ldc, ldc, Sumi5, &A[TILE_M * KB + i], KB, B + PACKED_INDEX(0, i, KB, TILE_SIZE), m1);
+ acc_C<TA, TB, is_acc>::apply(C + TILE_M * ldc + TILE_N, ldc, Sumi7, &A[TILE_M * KB + i], KB, B + PACKED_INDEX(1, i, KB, TILE_SIZE), m1);
+ }
+ });
+ }
+ return;
+}
+
+} // anonymous namespace
+
+// get the packed tensor size for quantized weights
+size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor) {
+ const enum ggml_type TYPE = tensor->type;
+
+ const int K = tensor->ne[0]; // ne0: in_features
+ const int N = tensor->ne[1]; // ne1: out_features
+
+ auto get_tensor_size = [&] {
+ size_t row_size_B{0};
+ GGML_DISPATCH_QTYPES(TYPE, [&] {
+ row_size_B = get_row_size<type, blck_size>(K);
+ });
+ return N * row_size_B;
+ };
+
+ if (qtype_has_amx_kernels(TYPE)) {
+ return get_tensor_size();
+ } else {
+ // for f16, bf16 we don't do packing
+ return ggml_nbytes(tensor);
+ }
+}
+
+// pack weight to vnni format
+void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ GGML_ASSERT(offset == 0 && size == ggml_nbytes(tensor)); // only full tensor conversion is supported for now
+
+ const enum ggml_type TYPE = tensor->type;
+
+ const int K = tensor->ne[0]; // ne0: in_features
+ const int N = tensor->ne[1]; // ne1: out_features
+
+#if defined(_OPENMP)
+ // the buffer ctx is not initialized when .set_tensor is called
+ int n_threads = omp_get_num_threads();
+#else
+ int n_threads = 1;
+#endif
+
+ GGML_DISPATCH_QTYPES(TYPE, [&] {
+ convert_B_packed_format<type, blck_size>((void *)((char *)tensor->data + offset), (const type *)data, N, K, n_threads);
+ });
+}
+
+size_t ggml_backend_amx_desired_wsize(const struct ggml_tensor * dst) {
+ struct ggml_tensor * src0 = dst->src[0];
+
+ const enum ggml_type TYPE = src0->type;
+
+ const bool is_floating_type = TYPE == GGML_TYPE_F16;
+ if (is_floating_type) {
+ return 0;
+ }
+
+ const int M = dst->ne[1];
+ const int K = src0->ne[0];
+
+ size_t desired_wsize = 0;
+
+ GGML_DISPATCH_QTYPES(TYPE, [&] {
+ const size_t row_size_A = K / blck_size * sizeof(vec_dot_type);
+ desired_wsize = M * row_size_A;
+ });
+
+ return desired_wsize;
+}
+
+// NB: mixed dtype gemm with Advanced Matrix Extensions (Intel AMX)
+//
+// src0: weight in shape of {N, K}, quantized
+// src1: input in shape of {M, K}, float32
+// dst: output in shape of {M, N}, float32
+//
+// the function performs: dst = src1 @ src0.T
+//
+void ggml_backend_amx_mul_mat(const ggml_compute_params * params, struct ggml_tensor * dst) {
+ struct ggml_tensor * src0 = dst->src[0];
+ struct ggml_tensor * src1 = dst->src[1];
+
+ const enum ggml_type TYPE = src0->type;
+
+ // f16 only has avx512 kernels for now,
+ // amx kernels will be added once 6th gen xeon is released.
+ const bool is_floating_type = TYPE == GGML_TYPE_F16;
+
+ const int M = dst->ne[1];
+ const int N = dst->ne[0];
+ const int K = src0->ne[0];
+ const int ldc = dst->nb[1] / dst->nb[0];
+
+ if (is_floating_type) {
+ constexpr int BLOCK_M = 4;
+ constexpr int BLOCK_N = 6;
+ const int MB = div_up(M, BLOCK_M);
+ const int NB = div_up(N, BLOCK_N);
+
+ parallel_for_ggml(params, MB * NB, [&](int begin, int end) {
+ GGML_DISPATCH_FLOATING_TYPES(TYPE, [&] {
+ for (int i = begin; i < end; ++i) {
+ int mb = i / NB;
+ int nb = i % NB;
+
+ int mb_start = mb * BLOCK_M;
+ int mb_size = std::min(BLOCK_M, M - mb_start);
+ int nb_start = nb * BLOCK_N;
+ int nb_size = std::min(BLOCK_N, N - nb_start);
+
+ switch (mb_size << 4 | nb_size) {
+ case 0x12: LAUNCH_TINYGEMM_KERNEL_AVX(1, 2); break;
+ case 0x14: LAUNCH_TINYGEMM_KERNEL_AVX(1, 4); break;
+ case 0x16: LAUNCH_TINYGEMM_KERNEL_AVX(1, 6); break;
+ case 0x22: LAUNCH_TINYGEMM_KERNEL_AVX(2, 2); break;
+ case 0x24: LAUNCH_TINYGEMM_KERNEL_AVX(2, 4); break;
+ case 0x26: LAUNCH_TINYGEMM_KERNEL_AVX(2, 6); break;
+ case 0x32: LAUNCH_TINYGEMM_KERNEL_AVX(3, 2); break;
+ case 0x34: LAUNCH_TINYGEMM_KERNEL_AVX(3, 4); break;
+ case 0x36: LAUNCH_TINYGEMM_KERNEL_AVX(3, 6); break;
+ case 0x42: LAUNCH_TINYGEMM_KERNEL_AVX(4, 2); break;
+ case 0x44: LAUNCH_TINYGEMM_KERNEL_AVX(4, 4); break;
+ case 0x46: LAUNCH_TINYGEMM_KERNEL_AVX(4, 6); break;
+ default: fprintf(stderr, "Unexpected block size!\n");
+ }
+ }
+ });
+ });
+ return;
+ }
+
+ // pointer to work space, used convert A from float to quantized type
+ void * wdata = params->wdata;
+
+ //TODO: performance improvement: merge quant A
+ if (params->ith == 0) {
+ GGML_DISPATCH_QTYPES(TYPE, [&] {
+ const size_t row_size_A = K / blck_size * sizeof(vec_dot_type);
+ const size_t desired_wsize = M * row_size_A;
+ if (params->wsize < desired_wsize) {
+ GGML_ABORT("insufficient work space size");
+ }
+
+ // Q4_0, Q4_1, Q8_0 handles 1 TILE_K per blck_size
+ // Q4_K, Q5_K, Q6_K, IQ4_XS handles 8 TILE_K per blck_size
+ GGML_ASSERT(TILE_K == blck_size || TILE_K * 8 == blck_size);
+
+ const float * A_data = static_cast<const float *>(src1->data);
+ for (int m = 0; m < M; ++m) {
+ from_float<vec_dot_type>(A_data + m * K, (char *)wdata + m * row_size_A, K);
+ }
+ });
+ }
+
+ ggml_barrier(params->threadpool);
+
+ if (M == 1) {
+ // MB = 1 and handle 8 tiles in each block
+ constexpr int kTilesN = 4;
+ constexpr int BLOCK_N = TILE_N * kTilesN;
+ const int NB = div_up(N, BLOCK_N);
+
+ parallel_for_ggml(params, NB, [&](int begin, int end) {
+ GGML_DISPATCH_QTYPES(TYPE, [&] {
+ const int KB = K / blck_size;
+ const int TILE_SIZE = get_tile_size<type>();
+ const int row_size_A = KB * sizeof(vec_dot_type);
+ for (int i = begin; i < end; ++i) {
+ int nb = i;
+ int nb_start = nb * BLOCK_N;
+ int nb_size = std::min(BLOCK_N, N - nb_start); // 32, 64, 96
+
+ switch (nb_size) {
+ //case 160: LAUNCH_TINYGEMM_KERNEL_VNNI(160); break;
+ case 128: LAUNCH_TINYGEMM_KERNEL_VNNI(128); break;
+ case 96: LAUNCH_TINYGEMM_KERNEL_VNNI(96); break;
+ case 64: LAUNCH_TINYGEMM_KERNEL_VNNI(64); break;
+ case 32: LAUNCH_TINYGEMM_KERNEL_VNNI(32); break;
+ default: fprintf(stderr, "Unexpected n block size!\n");
+ }
+ }
+ });
+ });
+ return;
+ }
+
+ // handle 4 tiles at a tile
+ constexpr int BLOCK_M = TILE_M * 2;
+ constexpr int BLOCK_N = TILE_N * 2;
+ const int MB = div_up(M, BLOCK_M);
+ const int NB = div_up(N, BLOCK_N);
+
+ parallel_for_ggml(params, MB * NB, [&](int begin, int end) {
+ // init tile config for each thread
+ ggml_tile_config_init();
+
+ GGML_DISPATCH_QTYPES(TYPE, [&] {
+ const int KB = K / blck_size;
+ const int TILE_SIZE = get_tile_size<type>();
+ const int row_size_A = KB * sizeof(vec_dot_type);
+
+ for (int i = begin; i < end; ++i) {
+ int mb = i / NB;
+ int nb = i % NB;
+
+ int mb_start = mb * BLOCK_M;
+ int mb_size = std::min(BLOCK_M, M - mb_start);
+ int nb_start = nb * BLOCK_N;
+ int nb_size = BLOCK_N;
+
+ tinygemm_kernel_amx<vec_dot_type, type, float, blck_size>(
+ mb_size, nb_size, KB,
+ (const char *)wdata + mb_start * row_size_A,
+ (const char *)src0->data + PACKED_INDEX(nb * 2, 0, KB, TILE_SIZE),
+ (float *) dst->data + mb_start * N + nb_start, ldc);
+ }
+ });
+ });
+}
+
+#endif // if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
--- /dev/null
+#pragma once
+#include "common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor);
+
+void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
+
+void ggml_backend_amx_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst);
+
+#ifdef __cplusplus
+}
+#endif
extern "C" {
#endif
+struct ggml_compute_params {
+ // ith = thread index, nth = number of threads
+ int ith, nth;
+
+ // work buffer for all threads
+ size_t wsize;
+ void * wdata;
+
+ struct ggml_threadpool * threadpool;
+};
+
+
#if defined(_MSC_VER)
#define m512bh(p) p
}
#endif
+// TODO: move to ggml-threading
+void ggml_barrier(struct ggml_threadpool * tp);
+
#ifdef __cplusplus
}
#endif
#include "ggml-quants.h"
#include "ggml-cpu-quants.h"
#include "ggml-threading.h"
+#include "amx/amx.h"
#include "ggml.h"
#if defined(_MSC_VER) || defined(__MINGW32__)
for (int i = 0; i < offset; ++i) { \
x[i] = _mm512_add_ps(x[i], x[offset+i]); \
} \
- res = _mm512_reduce_add_ps(x[0]); \
+ res = (ggml_float) _mm512_reduce_add_ps(x[0]); \
} while (0)
// TODO: is this optimal ?
for (int i = 0; i < offset; ++i) { \
x[i] = _mm512_add_ps(x[i], x[offset+i]); \
} \
- res = _mm512_reduce_add_ps(x[0]); \
+ res = (ggml_float) _mm512_reduce_add_ps(x[0]); \
} while (0)
#define GGML_F16_VEC GGML_F32Cx16
#define GGML_F16_VEC_FMA GGML_F32Cx16_FMA
#define GGML_F16_VEC_ADD GGML_F32Cx16_ADD
#define GGML_F16_VEC_MUL GGML_F32Cx16_MUL
-#define GGML_F16_VEC_REDUCE GGML_F32Cx16_REDUCE
+#define GGML_F16_VEC_REDUCE GGML_F32Cx16_REDUCE
#elif defined(__AVX__)
#define GGML_SIMD
#define GGML_F32x4_FMA(a, b, c) __lsx_vfmadd_s(b, c, a)
#define GGML_F32x4_ADD __lsx_vfadd_s
#define GGML_F32x4_MUL __lsx_vfmul_s
-#define GGML_F32x4_REDUCE(res, x) \
-{ \
- int offset = GGML_F32_ARR >> 1; \
- for (int i = 0; i < offset; ++i) { \
- x[i] = __lsx_vfadd_s(x[i], x[offset+i]); \
- } \
- offset >>= 1; \
- for (int i = 0; i < offset; ++i) { \
- x[i] = __lsx_vfadd_s(x[i], x[offset+i]); \
- } \
- offset >>= 1; \
- for (int i = 0; i < offset; ++i) { \
- x[i] = __lsx_vfadd_s(x[i], x[offset+i]); \
- } \
- __m128i tmp = __lsx_vsrli_d((__m128i)x[0], 32); \
- tmp = (__m128i)__lsx_vfadd_s((__m128)tmp, x[0]); \
- tmp = __lsx_vpickev_w(__lsx_vldi(0), tmp); \
- const __m128 t0 = __lsx_vshuf4i_w(tmp, 0x88); \
- tmp = __lsx_vsrli_d((__m128i)t0, 32); \
- tmp = (__m128i)__lsx_vfadd_s((__m128)tmp, t0); \
- tmp = __lsx_vpickev_w(__lsx_vldi(0), tmp); \
- res = (ggml_float) __lsx_vpickve2gr_w(__lsx_vshuf4i_w(tmp, 0x88), 0); \
+#define GGML_F32x4_REDUCE(res, x) \
+{ \
+ int offset = GGML_F32_ARR >> 1; \
+ for (int i = 0; i < offset; ++i) { \
+ x[i] = __lsx_vfadd_s(x[i], x[offset + i]); \
+ } \
+ offset >>= 1; \
+ for (int i = 0; i < offset; ++i) { \
+ x[i] = __lsx_vfadd_s(x[i], x[offset + i]); \
+ } \
+ offset >>= 1; \
+ for (int i = 0; i < offset; ++i) { \
+ x[i] = __lsx_vfadd_s(x[i], x[offset + i]); \
+ } \
+ __m128i tmp = __lsx_vsrli_d((__m128i) x[0], 32); \
+ tmp = (__m128i) __lsx_vfadd_s((__m128) tmp, x[0]); \
+ tmp = __lsx_vpickev_w(__lsx_vldi(0), tmp); \
+ const __m128 t0 = __lsx_vshuf4i_w(tmp, 0x88); \
+ tmp = __lsx_vsrli_d((__m128i) t0, 32); \
+ tmp = (__m128i) __lsx_vfadd_s((__m128) tmp, t0); \
+ tmp = __lsx_vpickev_w(__lsx_vldi(0), tmp); \
+ res = (ggml_float) __lsx_vpickve2gr_w(__lsx_vshuf4i_w(tmp, 0x88), 0); \
}
#define GGML_F32_VEC GGML_F32x4
int ith;
};
-struct ggml_compute_params {
- // ith = thread index, nth = number of threads
- int ith, nth;
-
- // work buffer for all threads
- size_t wsize;
- void * wdata;
-
- struct ggml_threadpool * threadpool;
-};
-
//
// fundamental operations
//
inline static void ggml_vec_set_i8(const int n, int8_t * x, const int8_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
-
inline static void ggml_vec_set_i16(const int n, int16_t * x, const int16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
-
inline static void ggml_vec_set_i32(const int n, int32_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
-
inline static void ggml_vec_set_f16(const int n, ggml_fp16_t * x, const int32_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
-
inline static void ggml_vec_set_bf16(const int n, ggml_bf16_t * x, const ggml_bf16_t v) { for (int i = 0; i < n; ++i) x[i] = v; }
-
inline static void ggml_vec_add_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i] + y[i]; }
inline static void ggml_vec_add1_f32(const int n, float * z, const float * x, const float v) { for (int i = 0; i < n; ++i) z[i] = x[i] + v; }
inline static void ggml_vec_acc_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] += x[i]; }
static struct ggml_state g_state = {0};
-static void ggml_barrier(struct ggml_threadpool * tp) {
+void ggml_barrier(struct ggml_threadpool * tp) {
int n_threads = atomic_load_explicit(&tp->n_threads_cur, memory_order_relaxed);
if (n_threads == 1) {
return;
type = (enum ggml_type)(intptr_t)src0->extra;
}
+#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
+ if (src0->buffer && ggml_backend_amx_buft_is_amx(src0->buffer->buft)) {
+ ggml_backend_amx_mul_mat(params, dst);
+ return;
+ }
+#endif
+
enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type;
ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float;
ggml_from_float_to_mat_t const from_float_to_mat = type_traits_cpu[vec_dot_type].from_float_to_mat;
} break;
case GGML_OP_MUL_MAT:
{
+#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
+ if (node->src[0]->buffer && ggml_backend_amx_buft_is_amx(node->src[0]->buffer->buft)) {
+ cur = ggml_backend_amx_desired_wsize(node);
+ }
+#endif
const enum ggml_type vec_dot_type = type_traits_cpu[node->src[0]->type].vec_dot_type;
if (node->src[1]->type != vec_dot_type) {
- cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
+ size_t cur2 = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
+ cur = MAX(cur, cur2);
}
} break;
case GGML_OP_MUL_MAT_ID:
#include "ggml-cpu.h"
#include "ggml-cpu-aarch64.h"
#include "ggml-impl.h"
+#include "amx/amx.h"
#include <cctype>
#include <string>
#include <vector>
static std::vector<ggml_backend_buffer_type_t> bufts = []() {
std::vector<ggml_backend_buffer_type_t> bufts;
-#ifdef GGML_USE_CPU_HBM
- bufts.push_back(ggml_backend_cpu_hbm_buffer_type());
+#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
+ if (ggml_backend_amx_buffer_type()) {
+ bufts.push_back(ggml_backend_amx_buffer_type());
+ }
#endif
#ifdef GGML_USE_CPU_AARCH64
- bufts.push_back(ggml_backend_cpu_aarch64_buffer_type());
+ if (ggml_backend_cpu_aarch64_buffer_type()) {
+ bufts.push_back(ggml_backend_cpu_aarch64_buffer_type());
+ }
#endif
bufts.push_back(NULL);
const struct ggml_tensor * src0 = op->src[0];
const struct ggml_tensor * src1 = op->src[1];
+ if (op->op == GGML_OP_NONE || op->op == GGML_OP_RESHAPE || op->op == GGML_OP_VIEW || op->op == GGML_OP_PERMUTE || op->op == GGML_OP_TRANSPOSE) {
+ return true;
+ }
+
if (src0 && src0->buffer && ggml_backend_cpu_buft_is_aarch64(src0->buffer->buft)) {
if (op->op != GGML_OP_MUL_MAT || src0->type == ggml_aarch64_get_optimal_repack_type(src0)) {
return false;
}
}
+#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
+ if (src0 && src0->buffer && ggml_backend_amx_buft_is_amx(src0->buffer->buft)) {
+ return ggml_backend_amx_device_supports_op(op);
+ }
+ for (int i = 1; i < GGML_MAX_SRC; i++) {
+ if (op->src[i] && op->src[i]->buffer && ggml_backend_amx_buft_is_amx(op->src[i]->buffer->buft)) {
+ return false;
+ }
+ }
+#endif
+
for (int i = 1; i < GGML_MAX_SRC; i++) {
if (op->src[i] && op->src[i]->buffer && ggml_backend_cpu_buft_is_aarch64(op->src[i]->buffer->buft)) {
return false;
}
static bool ggml_backend_cpu_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
- return ggml_backend_buft_is_host(buft) || ggml_backend_cpu_buft_is_aarch64(buft);
+ bool supported = ggml_backend_buft_is_host(buft) || ggml_backend_cpu_buft_is_aarch64(buft);
+
+#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
+ supported = supported || ggml_backend_amx_buft_is_amx(buft);
+#endif
+
+ return supported;
GGML_UNUSED(dev);
}
#include "sgemm.h"
#include "ggml-impl.h"
-// hack until moved into the CPU backend
-#include "../ggml-cpu-impl.h"
+#include "ggml-cpu-impl.h"
#include "ggml-quants.h"
#ifdef _MSC_VER
extern "C" {
#endif
-#undef MIN
-#undef MAX
+#ifndef MIN
+# define MIN(a, b) ((a) < (b) ? (a) : (b))
+#endif
-#define MIN(a, b) ((a) < (b) ? (a) : (b))
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
+#ifndef MAX
+# define MAX(a, b) ((a) > (b) ? (a) : (b))
+#endif
// required for mmap as gguf only guarantees 32-byte alignment
#define TENSOR_ALIGNMENT 32
set(TARGET vulkan-shaders-gen)
add_executable(${TARGET} vulkan-shaders-gen.cpp)
install(TARGETS ${TARGET} RUNTIME)
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
target_link_libraries(vulkan-shaders-gen PUBLIC Threads::Threads)
set(TARGET llama-vdot)
add_executable(${TARGET} vdot.cpp)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
set(TARGET llama-q8dot)
add_executable(${TARGET} q8dot.cpp)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_11)
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
)
target_include_directories(llama PUBLIC . ../include)
-target_compile_features (llama PUBLIC cxx_std_11) # don't bump
+target_compile_features (llama PUBLIC cxx_std_17) # don't bump
target_link_libraries(llama PUBLIC ggml)
}
static inline std::wstring unicode_wstring_from_utf8(const std::string & s) {
+#if defined(__clang__)
+ // disable C++17 deprecation warning for std::codecvt_utf8
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
std::wstring_convert<std::codecvt_utf8<wchar_t>> conv;
+
+#if defined(__clang__)
+# pragma clang diagnostic pop
+#endif
+
return conv.from_bytes(s);
}
data.reserve(n_vocab);
for (int i = 0; i < n_vocab; i++) {
- const float logit = 2.0f*((float)(rand())/RAND_MAX - 0.5f);
+ const float logit = 2.0f*((double)(rand())/RAND_MAX - 0.5);
data.emplace_back(llama_token_data{i, logit, 0.0f});
}