msystem: ${{matrix.sys}}
install: >-
base-devel
+ git
mingw-w64-${{matrix.env}}-toolchain
mingw-w64-${{matrix.env}}-cmake
mingw-w64-${{matrix.env}}-openblas
set(LLAMA_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files")
set(LLAMA_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files")
-# At the moment some compile definitions are placed within the ggml/src
-# directory but not exported on the `ggml` target. This could be improved by
-# determining _precisely_ which defines are necessary for the llama-config
-# package.
-#
-set(GGML_TRANSIENT_DEFINES)
-get_target_property(GGML_DIRECTORY ggml SOURCE_DIR)
-get_directory_property(GGML_DIR_DEFINES DIRECTORY ${GGML_DIRECTORY} COMPILE_DEFINITIONS)
-if (GGML_DIR_DEFINES)
- list(APPEND GGML_TRANSIENT_DEFINES ${GGML_DIR_DEFINES})
-endif()
-get_target_property(GGML_TARGET_DEFINES ggml COMPILE_DEFINITIONS)
-if (GGML_TARGET_DEFINES)
- list(APPEND GGML_TRANSIENT_DEFINES ${GGML_TARGET_DEFINES})
-endif()
-get_target_property(GGML_LINK_LIBRARIES ggml LINK_LIBRARIES)
-# all public headers
set(LLAMA_PUBLIC_HEADERS
${CMAKE_CURRENT_SOURCE_DIR}/include/llama.h
${CMAKE_CURRENT_SOURCE_DIR}/include/llama-cpp.h)
-set_target_properties(llama PROPERTIES PUBLIC_HEADER "${LLAMA_PUBLIC_HEADERS}")
+
+set_target_properties(llama
+ PROPERTIES
+ PUBLIC_HEADER "${LLAMA_PUBLIC_HEADERS}")
+
install(TARGETS llama LIBRARY PUBLIC_HEADER)
configure_package_config_file(
set(LLAMA_BUILD_NUMBER @LLAMA_BUILD_NUMBER@)
set(LLAMA_SHARED_LIB @BUILD_SHARED_LIBS@)
-set(GGML_STATIC @GGML_STATIC@)
-set(GGML_NATIVE @GGML_NATIVE@)
-set(GGML_LTO @GGML_LTO@)
-set(GGML_CCACHE @GGML_CCACHE@)
-set(GGML_AVX @GGML_AVX@)
-set(GGML_AVX2 @GGML_AVX2@)
-set(GGML_AVX512 @GGML_AVX512@)
-set(GGML_AVX512_VBMI @GGML_AVX512_VBMI@)
-set(GGML_AVX512_VNNI @GGML_AVX512_VNNI@)
-set(GGML_AVX512_BF16 @GGML_AVX512_BF16@)
-set(GGML_AMX_TILE @GGML_AMX_TILE@)
-set(GGML_AMX_INT8 @GGML_AMX_INT8@)
-set(GGML_AMX_BF16 @GGML_AMX_BF16@)
-set(GGML_FMA @GGML_FMA@)
-set(GGML_LASX @GGML_LASX@)
-set(GGML_LSX @GGML_LSX@)
-set(GGML_RVV @GGML_RVV@)
-set(GGML_SVE @GGML_SVE@)
-
-set(GGML_ACCELERATE @GGML_ACCELERATE@)
-set(GGML_OPENMP @GGML_OPENMP@)
-set(GGML_CPU_HBM @GGML_CPU_HBM@)
-set(GGML_BLAS_VENDOR @GGML_BLAS_VENDOR@)
-
-set(GGML_CUDA_FORCE_MMQ @GGML_CUDA_FORCE_MMQ@)
-set(GGML_CUDA_FORCE_CUBLAS @GGML_CUDA_FORCE_CUBLAS@)
-set(GGML_CUDA_F16 @GGML_CUDA_F16@)
-set(GGML_CUDA_PEER_MAX_BATCH_SIZE @GGML_CUDA_PEER_MAX_BATCH_SIZE@)
-set(GGML_CUDA_NO_PEER_COPY @GGML_CUDA_NO_PEER_COPY@)
-set(GGML_CUDA_NO_VMM @GGML_CUDA_NO_VMM@)
-set(GGML_CUDA_FA_ALL_QUANTS @GGML_CUDA_FA_ALL_QUANTS@)
-set(GGML_CUDA_GRAPHS @GGML_CUDA_GRAPHS@)
-
-set(GGML_HIP_UMA @GGML_HIP_UMA@)
-
-set(GGML_VULKAN_CHECK_RESULTS @GGML_VULKAN_CHECK_RESULTS@)
-set(GGML_VULKAN_DEBUG @GGML_VULKAN_DEBUG@)
-set(GGML_VULKAN_MEMORY_DEBUG @GGML_VULKAN_MEMORY_DEBUG@)
-set(GGML_VULKAN_SHADER_DEBUG_INFO @GGML_VULKAN_SHADER_DEBUG_INFO@)
-set(GGML_VULKAN_PERF @GGML_VULKAN_PERF@)
-set(GGML_VULKAN_VALIDATE @GGML_VULKAN_VALIDATE@)
-set(GGML_VULKAN_RUN_TESTS @GGML_VULKAN_RUN_TESTS@)
-
-set(GGML_METAL_USE_BF16 @GGML_METAL_USE_BF16@)
-set(GGML_METAL_NDEBUG @GGML_METAL_NDEBUG@)
-set(GGML_METAL_SHADER_DEBUG @GGML_METAL_SHADER_DEBUG@)
-set(GGML_METAL_EMBED_LIBRARY @GGML_METAL_EMBED_LIBRARY@)
-set(GGML_METAL_MACOSX_VERSION_MIN @GGML_METAL_MACOSX_VERSION_MIN@)
-set(GGML_METAL_STD @GGML_METAL_STD@)
-
-set(GGML_SYCL_F16 @GGML_SYCL_F16@)
-set(GGML_SYCL_TARGET @GGML_SYCL_TARGET@)
-set(GGML_SYCL_DEVICE_ARCH @GGML_SYCL_DEVICE_ARCH@)
-
-
@PACKAGE_INIT@
set_and_check(LLAMA_INCLUDE_DIR "@PACKAGE_LLAMA_INCLUDE_INSTALL_DIR@")
set_and_check(LLAMA_LIB_DIR "@PACKAGE_LLAMA_LIB_INSTALL_DIR@")
set_and_check(LLAMA_BIN_DIR "@PACKAGE_LLAMA_BIN_INSTALL_DIR@")
-find_package(Threads REQUIRED)
-
-set(_llama_transient_defines "@GGML_TRANSIENT_DEFINES@")
-set(_llama_link_deps "")
-set(_llama_link_opts "")
-foreach(_ggml_lib ggml ggml-base)
- string(REPLACE "-" "_" _ggml_lib_var "${_ggml_lib}_LIBRARY")
- find_library(${_ggml_lib_var} ${_ggml_lib}
- REQUIRED
- HINTS ${LLAMA_LIB_DIR}
- NO_CMAKE_FIND_ROOT_PATH
- )
- list(APPEND _llama_link_deps "${${_ggml_lib_var}}")
- message(STATUS "Found ${${_ggml_lib_var}}")
-endforeach()
-
-foreach(backend amx blas cann cpu cuda hip kompute metal musa rpc sycl vulkan)
- string(TOUPPER "GGML_${backend}" backend_id)
- set(_ggml_lib "ggml-${backend}")
- string(REPLACE "-" "_" _ggml_lib_var "${_ggml_lib}_LIBRARY")
-
- find_library(${_ggml_lib_var} ${_ggml_lib}
- HINTS ${LLAMA_LIB_DIR}
- NO_CMAKE_FIND_ROOT_PATH
- )
- if(${_ggml_lib_var})
- list(APPEND _llama_link_deps "${${_ggml_lib_var}}")
- set(${backend_id} ON)
- message(STATUS "Found backend ${${_ggml_lib_var}}")
- else()
- set(${backend_id} OFF)
- endif()
-endforeach()
-
-if (NOT LLAMA_SHARED_LIB)
- if (APPLE AND GGML_ACCELERATE)
- find_library(ACCELERATE_FRAMEWORK Accelerate REQUIRED)
- list(APPEND _llama_link_deps ${ACCELERATE_FRAMEWORK})
- endif()
-
- if (GGML_OPENMP)
- find_package(OpenMP REQUIRED)
- list(APPEND _llama_link_deps OpenMP::OpenMP_C OpenMP::OpenMP_CXX)
- endif()
-
- if (GGML_CPU_HBM)
- find_library(memkind memkind REQUIRED)
- list(APPEND _llama_link_deps memkind)
- endif()
-
- if (GGML_BLAS)
- find_package(BLAS REQUIRED)
- list(APPEND _llama_link_deps ${BLAS_LIBRARIES})
- list(APPEND _llama_link_opts ${BLAS_LINKER_FLAGS})
- endif()
-
- if (GGML_CUDA)
- find_package(CUDAToolkit REQUIRED)
- endif()
-
- if (GGML_METAL)
- find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
- find_library(METAL_FRAMEWORK Metal REQUIRED)
- find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
- list(APPEND _llama_link_deps ${FOUNDATION_LIBRARY}
- ${METAL_FRAMEWORK} ${METALKIT_FRAMEWORK})
- endif()
-
- if (GGML_VULKAN)
- find_package(Vulkan REQUIRED)
- list(APPEND _llama_link_deps Vulkan::Vulkan)
- endif()
-
- if (GGML_HIP)
- find_package(hip REQUIRED)
- find_package(hipblas REQUIRED)
- find_package(rocblas REQUIRED)
- list(APPEND _llama_link_deps hip::host roc::rocblas roc::hipblas)
- endif()
-
- if (GGML_SYCL)
- find_package(DNNL)
- if (${DNNL_FOUND} AND GGML_SYCL_TARGET STREQUAL "INTEL")
- list(APPEND _llama_link_deps DNNL::dnnl)
- endif()
- if (WIN32)
- find_package(IntelSYCL REQUIRED)
- find_package(MKL REQUIRED)
- list(APPEND _llama_link_deps IntelSYCL::SYCL_CXX MKL::MKL MKL::MKL_SYCL)
- endif()
- endif()
-endif()
+find_package(ggml REQUIRED)
find_library(llama_LIBRARY llama
REQUIRED
set_target_properties(llama
PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "${LLAMA_INCLUDE_DIR}"
- INTERFACE_LINK_LIBRARIES "${_llama_link_deps}"
- INTERFACE_LINK_OPTIONS "${_llama_link_opts}"
- INTERFACE_COMPILE_DEFINITIONS "${_llama_transient_defines}"
+ INTERFACE_LINK_LIBRARIES "ggml::ggml;ggml::ggml-base;"
IMPORTED_LINK_INTERFACE_LANGUAGES "CXX"
IMPORTED_LOCATION "${llama_LIBRARY}"
- INTERFACE_COMPILE_FEATURES cxx_std_11
- POSITION_INDEPENDENT_CODE ON )
+ INTERFACE_COMPILE_FEATURES c_std_90
+ POSITION_INDEPENDENT_CODE ON)
check_required_components(Llama)
+++ /dev/null
-# Prerequisites
-*.d
-
-# Compiled Object files
-*.slo
-*.lo
-*.o
-*.obj
-
-# Precompiled Headers
-*.gch
-*.pch
-
-# Compiled Dynamic libraries
-*.so
-*.dylib
-*.dll
-
-# Fortran module files
-*.mod
-*.smod
-
-# Compiled Static libraries
-*.lai
-*.la
-*.a
-*.lib
-
-# Executables
-*.exe
-*.out
-*.app
-
-*.gguf
-
-*.log
-.DS_Store
-.build/
-.cache/
-.direnv/
-.envrc
-.swiftpm
-.venv
-.clang-tidy
-.vs/
-.vscode/
-
-build*/
-out/
-tmp/
+++ /dev/null
-cmake_minimum_required(VERSION 3.12)
-project("llama-cli-cmake-pkg" C CXX)
-set(TARGET llama-cli-cmake-pkg)
-
-find_package(Llama 0.0.1 REQUIRED)
-
-# Bake common functionality in with target. Because applications
-# using the relocatable Llama package should be outside of the
-# source tree, llama-cli-cmake-pkg pretends the dependencies are built-in.
-set(_common_path "${CMAKE_CURRENT_LIST_DIR}/../../common")
-add_library(common OBJECT)
-file(GLOB _common_files
- "${_common_path}/*.h"
- "${_common_path}/*.cpp"
-)
-target_sources(common PRIVATE ${_common_files})
-
-# If the common project was part of "llama-cli-cmake-pkg" the transient
-# defines would automatically be attached. Because the common func-
-# tionality is separate, but dependent upon the defines, it must be
-# explicitly extracted from the "llama" target.
-#
-get_target_property(_llama_transient_defines llama
- INTERFACE_COMPILE_DEFINITIONS)
-
-target_compile_definitions(common PRIVATE "${_llama_transient_defines}")
-
-add_executable(${TARGET} ${CMAKE_CURRENT_LIST_DIR}/../main/main.cpp)
-target_include_directories(${TARGET} PRIVATE ${_common_path})
-install(TARGETS ${TARGET} RUNTIME)
-target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
-target_compile_features(${TARGET} PRIVATE cxx_std_17)
+++ /dev/null
-# llama.cpp/example/main-cmake-pkg
-
-This program builds [llama-cli](../main) using a relocatable CMake package. It serves as an example of using the `find_package()` CMake command to conveniently include [llama.cpp](https://github.com/ggerganov/llama.cpp) in projects which live outside of the source tree.
-
-## Building
-
-Because this example is "outside of the source tree", it is important to first build/install llama.cpp using CMake. An example is provided here, but please see the [llama.cpp build instructions](../..) for more detailed build instructions.
-
-### Considerations
-
-When hardware acceleration libraries are used (e.g. CUDA, Metal, etc.), CMake must be able to locate the associated CMake package.
-
-### Build llama.cpp and install to C:\LlamaCPP directory
-
-```cmd
-git clone https://github.com/ggerganov/llama.cpp
-cd llama.cpp
-cmake -B build -DBUILD_SHARED_LIBS=OFF -G "Visual Studio 17 2022" -A x64
-cmake --build build --config Release
-cmake --install build --prefix C:/LlamaCPP
-```
-
-### Build llama-cli-cmake-pkg
-
-
-```cmd
-cd ..\examples\main-cmake-pkg
-cmake -B build -DBUILD_SHARED_LIBS=OFF -DCMAKE_PREFIX_PATH="C:/LlamaCPP/lib/cmake/Llama" -G "Visual Studio 17 2022" -A x64
-cmake --build build --config Release
-cmake --install build --prefix C:/MyLlamaApp
-```
--- /dev/null
+# Prerequisites
+*.d
+
+# Compiled Object files
+*.slo
+*.lo
+*.o
+*.obj
+
+# Precompiled Headers
+*.gch
+*.pch
+
+# Compiled Dynamic libraries
+*.so
+*.dylib
+*.dll
+
+# Fortran module files
+*.mod
+*.smod
+
+# Compiled Static libraries
+*.lai
+*.la
+*.a
+*.lib
+
+# Executables
+*.exe
+*.out
+*.app
+
+*.gguf
+
+*.log
+.DS_Store
+.build/
+.cache/
+.direnv/
+.envrc
+.swiftpm
+.venv
+.clang-tidy
+.vs/
+.vscode/
+
+build*/
+out/
+tmp/
--- /dev/null
+cmake_minimum_required(VERSION 3.12)
+project(llama-simple-cmake-pkg)
+
+set(TARGET llama-simple-cmake-pkg)
+
+find_package(Llama REQUIRED)
+
+add_executable(${TARGET} ${CMAKE_CURRENT_LIST_DIR}/../simple/simple.cpp)
+install(TARGETS ${TARGET} RUNTIME)
+target_link_libraries(${TARGET} PRIVATE llama ggml::all ${CMAKE_THREAD_LIBS_INIT})
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
--- /dev/null
+# llama.cpp/example/simple-cmake-pkg
+
+This program builds [simple](../simple) using a relocatable CMake package. It serves as an example of using the `find_package()` CMake command to conveniently include [llama.cpp](https://github.com/ggerganov/llama.cpp) in projects which live outside of the source tree.
+
+## Building
+
+Because this example is "outside of the source tree", it is important to first build/install llama.cpp using CMake. An example is provided here, but please see the [llama.cpp build instructions](../..) for more detailed build instructions.
+
+### Considerations
+
+When hardware acceleration libraries are used (e.g. CUDA, Metal, Vulkan, etc.), the appropriate dependencies will be searched for automatically. So, for example, when finding a package
+
+### Build llama.cpp and install to llama.cpp/inst
+
+```sh
+git clone https://github.com/ggerganov/llama.cpp
+cd llama.cpp
+cmake -S . -B build
+cmake --build build
+cmake --install build --prefix inst
+
+### Build simple-cmake-pkg
+
+```sh
+cd examples/simple-cmake-pkg
+cmake -S . -B build -DCMAKE_PREFIX_PATH=../../inst/lib/cmake
+cmake --build build
+```
+
+### Run simple-cmake-pkg
+
+```sh
+./build/llama-simple-cmake-pkg -m ./models/llama-7b-v2/ggml-model-f16.gguf "Hello my name is"
+```
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/ggml.pc
DESTINATION share/pkgconfig)
endif()
+
+#
+# Create CMake package
+#
+
+# Generate version info based on git commit.
+
+find_program(GIT_EXE NAMES git git.exe REQUIRED NO_CMAKE_FIND_ROOT_PATH)
+execute_process(COMMAND ${GIT_EXE} rev-list --count HEAD
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+ OUTPUT_VARIABLE GGML_BUILD_NUMBER
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+)
+
+if(GGML_BUILD_NUMBER EQUAL 1)
+ message(WARNING "GGML build version fixed at 1 likely due to a shallow clone.")
+endif()
+
+execute_process(COMMAND ${GIT_EXE} rev-parse --short HEAD
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
+ OUTPUT_VARIABLE GGML_BUILD_COMMIT
+ OUTPUT_STRIP_TRAILING_WHITESPACE
+)
+
+# Capture variables prefixed with GGML_.
+
+set(variable_set_statements
+"
+####### Expanded from @GGML_VARIABLES_EXPANED@ by configure_package_config_file() #######
+####### Any changes to this file will be overwritten by the next CMake run #######
+
+")
+
+set(GGML_SHARED_LIB ${BUILD_SHARED_LIBS})
+
+get_cmake_property(all_variables VARIABLES)
+foreach(variable_name IN LISTS all_variables)
+ if(variable_name MATCHES "^GGML_")
+ string(REPLACE ";" "\\;"
+ variable_value "${${variable_name}}")
+
+ set(variable_set_statements
+ "${variable_set_statements}set(${variable_name} \"${variable_value}\")\n")
+ endif()
+endforeach()
+
+set(GGML_VARIABLES_EXPANDED ${variable_set_statements})
+
+# Create the CMake package and set install location.
+
+set(GGML_INSTALL_VERSION 0.0.${GGML_BUILD_NUMBER})
+set(GGML_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files")
+set(GGML_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files")
+set(GGML_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files")
+
+configure_package_config_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/cmake/ggml-config.cmake.in
+ ${CMAKE_CURRENT_BINARY_DIR}/ggml-config.cmake
+ INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/ggml
+ PATH_VARS GGML_INCLUDE_INSTALL_DIR
+ GGML_LIB_INSTALL_DIR
+ GGML_BIN_INSTALL_DIR)
+
+write_basic_package_version_file(
+ ${CMAKE_CURRENT_BINARY_DIR}/ggml-version.cmake
+ VERSION ${GGML_INSTALL_VERSION}
+ COMPATIBILITY SameMajorVersion)
+
+install(FILES ${CMAKE_CURRENT_BINARY_DIR}/ggml-config.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/ggml-version.cmake
+ DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/ggml)
--- /dev/null
+
+@GGML_VARIABLES_EXPANDED@
+
+@PACKAGE_INIT@
+
+set_and_check(GGML_INCLUDE_DIR "@PACKAGE_GGML_INCLUDE_INSTALL_DIR@")
+set_and_check(GGML_LIB_DIR "@PACKAGE_GGML_LIB_INSTALL_DIR@")
+set_and_check(GGML_BIN_DIR "@PACKAGE_GGML_BIN_INSTALL_DIR@")
+
+find_package(Threads REQUIRED)
+
+find_library(GGML_LIBRARY ggml
+ REQUIRED
+ HINTS ${GGML_LIB_DIR}
+ NO_CMAKE_FIND_ROOT_PATH)
+
+add_library(ggml::ggml UNKNOWN IMPORTED)
+set_target_properties(ggml::ggml
+ PROPERTIES
+ IMPORTED_LOCATION "${GGML_LIBRARY}")
+
+find_library(GGML_BASE_LIBRARY ggml-base
+ REQUIRED
+ HINTS ${GGML_LIB_DIR}
+ NO_CMAKE_FIND_ROOT_PATH)
+
+add_library(ggml::ggml-base UNKNOWN IMPORTED)
+set_target_properties(ggml::ggml-base
+ PROPERTIES
+ IMPORTED_LOCATION "${GGML_BASE_LIBRARY}")
+
+if (NOT GGML_SHARED_LIB)
+ if (APPLE AND GGML_ACCELERATE)
+ find_library(ACCELERATE_FRAMEWORK Accelerate REQUIRED)
+ list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES ${ACCELERATE_FRAMEWORK})
+ endif()
+
+ if (GGML_OPENMP)
+ find_package(OpenMP REQUIRED)
+ list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES OpenMP::OpenMP_C OpenMP::OpenMP_CXX)
+ endif()
+
+ if (GGML_CPU_HBM)
+ find_library(memkind memkind REQUIRED)
+ list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES memkind)
+ endif()
+
+ if (GGML_BLAS)
+ find_package(BLAS REQUIRED)
+ list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES ${BLAS_LIBRARIES})
+ list(APPEND GGML_CPU_INTERFACE_LINK_OPTIONS ${BLAS_LINKER_FLAGS})
+ endif()
+
+ if (GGML_CUDA)
+ find_package(CUDAToolkit REQUIRED)
+ endif()
+
+ if (GGML_METAL)
+ find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
+ find_library(METAL_FRAMEWORK Metal REQUIRED)
+ find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
+
+ list(APPEND GGML_METAL_INTERFACE_LINK_LIBRARIES
+ ${FOUNDATION_LIBRARY} ${METAL_FRAMEWORK} ${METALKIT_FRAMEWORK})
+ endif()
+
+ if (GGML_VULKAN)
+ find_package(Vulkan REQUIRED)
+ list(APPEND GGML_VULKAN_INTERFACE_LINK_LIBRARIES Vulkan::Vulkan)
+ endif()
+
+ if (GGML_HIP)
+ find_package(hip REQUIRED)
+ find_package(hipblas REQUIRED)
+ find_package(rocblas REQUIRED)
+ list(APPEND GGML_HIP_INTERFACE_LINK_LIBRARIES hip::host roc::rocblas roc::hipblas)
+ endif()
+
+ if (GGML_SYCL)
+ find_package(DNNL)
+ if (${DNNL_FOUND} AND GGML_SYCL_TARGET STREQUAL "INTEL")
+ list(APPEND GGML_SYCL_INTERFACE_LINK_LIBRARIES DNNL::dnnl)
+ endif()
+ if (WIN32)
+ find_package(IntelSYCL REQUIRED)
+ find_package(MKL REQUIRED)
+ list(APPEND GGML_SYCL_INTERFACE_LINK_LIBRARIES IntelSYCL::SYCL_CXX MKL::MKL MKL::MKL_SYCL)
+ endif()
+ endif()
+endif()
+
+set(_ggml_all_targets "")
+foreach(_ggml_backend ${GGML_AVAILABLE_BACKENDS})
+ string(REPLACE "-" "_" _ggml_backend_pfx "${_ggml_backend}")
+ string(TOUPPER "${_ggml_backend_pfx}" _ggml_backend_pfx)
+
+ find_library(${_ggml_backend_pfx}_LIBRARY ${_ggml_backend}
+ REQUIRED
+ HINTS ${GGML_LIB_DIR}
+ NO_CMAKE_FIND_ROOT_PATH)
+
+ message(STATUS "Found ${${_ggml_backend_pfx}_LIBRARY}")
+
+ add_library(ggml::${_ggml_backend} UNKNOWN IMPORTED)
+ set_target_properties(ggml::${_ggml_backend}
+ PROPERTIES
+ INTERFACE_INCLUDE_DIRECTORIES "${GGML_INCLUDE_DIR}"
+ IMPORTED_LINK_INTERFACE_LANGUAGES "CXX"
+ IMPORTED_LOCATION "${${_ggml_backend_pfx}_LIBRARY}"
+ INTERFACE_COMPILE_FEATURES c_std_90
+ POSITION_INDEPENDENT_CODE ON)
+
+ string(REGEX MATCH "^ggml-cpu" is_cpu_variant "${_ggml_backend}")
+ if(is_cpu_variant)
+ list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES "ggml::ggml" "ggml::ggml-base")
+ set_target_properties(ggml::${_ggml_backend}
+ PROPERTIES
+ INTERFACE_LINK_LIBRARIES "${GGML_CPU_INTERFACE_LINK_LIBRARIES}")
+
+ if(GGML_CPU_INTERFACE_LINK_OPTIONS)
+ set_target_properties(ggml::${_ggml_backend}
+ PROPERTIES
+ INTERFACE_LINK_OPTIONS "${GGML_CPU_INTERFACE_LINK_OPTIONS}")
+ endif()
+
+ else()
+ list(APPEND ${_ggml_backend_pfx}_INTERFACE_LINK_LIBRARIES "ggml::ggml" "ggml::ggml-base")
+ set_target_properties(ggml::${_ggml_backend}
+ PROPERTIES
+ INTERFACE_LINK_LIBRARIES "${${_ggml_backend_pfx}_INTERFACE_LINK_LIBRARIES}")
+
+ if(${_ggml_backend_pfx}_INTERFACE_LINK_OPTIONS)
+ set_target_properties(ggml::${_ggml_backend}
+ PROPERTIES
+ INTERFACE_LINK_OPTIONS "${${_ggml_backend_pfx}_INTERFACE_LINK_OPTIONS}")
+ endif()
+ endif()
+
+ list(APPEND _ggml_all_targets ggml::${_ggml_backend})
+endforeach()
+
+add_library(ggml::all INTERFACE IMPORTED)
+set_target_properties(ggml::all
+ PROPERTIES
+ INTERFACE_LINK_LIBRARIES "${_ggml_all_targets}")
+
+check_required_components(ggml)
target_compile_definitions(${backend} PRIVATE GGML_BACKEND_BUILD)
target_compile_definitions(${backend} PUBLIC GGML_BACKEND_SHARED)
endif()
+
+ if(NOT GGML_AVAILABLE_BACKENDS)
+ set(GGML_AVAILABLE_BACKENDS "${backend}"
+ CACHE INTERNAL "List of backends for cmake package")
+ else()
+ list(FIND GGML_AVAILABLE_BACKENDS "${backend}" has_backend)
+ if(has_backend EQUAL -1)
+ set(GGML_AVAILABLE_BACKENDS "${GGML_AVAILABLE_BACKENDS};${backend}"
+ CACHE INTERNAL "List of backends for cmake package")
+ endif()
+ endif()
endfunction()
function(ggml_add_backend backend)