endif()
endif()
endfunction()
-
-function(llama_download_model NAME HASH)
- set(DEST "${CMAKE_BINARY_DIR}/${NAME}")
- get_filename_component(DEST_DIR "${DEST}" DIRECTORY)
- file(MAKE_DIRECTORY "${DEST_DIR}")
- if(NOT EXISTS "${DEST}")
- message(STATUS "Downloading ${NAME} from ggml-org/models...")
- endif()
- file(DOWNLOAD
- "https://huggingface.co/ggml-org/models/resolve/main/${NAME}?download=true"
- "${DEST}"
- TLS_VERIFY ON
- EXPECTED_HASH ${HASH}
- STATUS status
- )
- list(GET status 0 code)
- if(NOT code EQUAL 0)
- list(GET status 1 msg)
- message(FATAL_ERROR "Failed to download ${NAME}: ${msg}")
- endif()
- set(LLAMA_DOWNLOAD_MODEL "${DEST}" PARENT_SCOPE)
-endfunction()
--- /dev/null
+get_filename_component(DEST_DIR "${DEST}" DIRECTORY)
+file(MAKE_DIRECTORY "${DEST_DIR}")
+
+if(NOT EXISTS "${DEST}")
+ message(STATUS "Downloading ${NAME} from ggml-org/models...")
+endif()
+
+file(DOWNLOAD
+ "https://huggingface.co/ggml-org/models/resolve/main/${NAME}?download=true"
+ "${DEST}"
+ TLS_VERIFY ON
+ EXPECTED_HASH ${HASH}
+ STATUS status
+)
+
+list(GET status 0 code)
+
+if(NOT code EQUAL 0)
+ list(GET status 1 msg)
+ message(FATAL_ERROR "Failed to download ${NAME}: ${msg}")
+endif()
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
target_compile_features(${TARGET} PRIVATE cxx_std_17)
-set(TEST_TARGET test-eval-callback)
-if(NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
- llama_download_model("tinyllamas/stories15M-q4_0.gguf" SHA256=66967fbece6dbe97886593fdbb73589584927e29119ec31f08090732d1861739)
-else()
- llama_download_model("tinyllamas/stories15M-be.Q4_0.gguf" SHA256=9aec857937849d976f30397e97eb1cabb53eb9dcb1ce4611ba8247fb5f44c65d)
+if(LLAMA_BUILD_TESTS)
+ if(NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
+ set(MODEL_NAME "tinyllamas/stories15M-q4_0.gguf")
+ set(MODEL_HASH "SHA256=66967fbece6dbe97886593fdbb73589584927e29119ec31f08090732d1861739")
+ else()
+ set(MODEL_NAME "tinyllamas/stories15M-be.Q4_0.gguf")
+ set(MODEL_HASH "SHA256=9aec857937849d976f30397e97eb1cabb53eb9dcb1ce4611ba8247fb5f44c65d")
+ endif()
+ set(MODEL_DEST "${CMAKE_BINARY_DIR}/${MODEL_NAME}")
+ set(TEST_TARGET test-eval-callback)
+ add_test(NAME ${TEST_TARGET}-download-model COMMAND ${CMAKE_COMMAND}
+ -DDEST=${MODEL_DEST}
+ -DNAME=${MODEL_NAME}
+ -DHASH=${MODEL_HASH}
+ -P ${CMAKE_SOURCE_DIR}/cmake/download-models.cmake
+ )
+ set_tests_properties(${TEST_TARGET}-download-model PROPERTIES FIXTURES_SETUP ${TEST_TARGET}-download-model)
+ add_test(NAME ${TEST_TARGET} COMMAND llama-eval-callback -m "${MODEL_DEST}" --prompt hello --seed 42 -ngl 0)
+ set_tests_properties(${TEST_TARGET} PROPERTIES FIXTURES_REQUIRED ${TEST_TARGET}-download-model)
endif()
-add_test(NAME ${TEST_TARGET} COMMAND llama-eval-callback -m "${LLAMA_DOWNLOAD_MODEL}" --prompt hello --seed 42 -ngl 0)
llama_build_and_test(test-regex-partial.cpp)
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
- llama_download_model("tinyllamas/stories15M-q4_0.gguf" SHA256=66967fbece6dbe97886593fdbb73589584927e29119ec31f08090732d1861739)
+ set(MODEL_NAME "tinyllamas/stories15M-q4_0.gguf")
+ set(MODEL_HASH "SHA256=66967fbece6dbe97886593fdbb73589584927e29119ec31f08090732d1861739")
else()
- llama_download_model("tinyllamas/stories15M-be.Q4_0.gguf" SHA256=9aec857937849d976f30397e97eb1cabb53eb9dcb1ce4611ba8247fb5f44c65d)
+ set(MODEL_NAME "tinyllamas/stories15M-be.Q4_0.gguf")
+ set(MODEL_HASH "SHA256=9aec857937849d976f30397e97eb1cabb53eb9dcb1ce4611ba8247fb5f44c65d")
endif()
-llama_build_and_test(test-thread-safety.cpp ARGS -m "${LLAMA_DOWNLOAD_MODEL}" -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4 -t 2)
+set(MODEL_DEST "${CMAKE_BINARY_DIR}/${MODEL_NAME}")
+
+add_test(NAME test-download-model COMMAND ${CMAKE_COMMAND}
+ -DDEST=${MODEL_DEST}
+ -DNAME=${MODEL_NAME}
+ -DHASH=${MODEL_HASH}
+ -P ${CMAKE_SOURCE_DIR}/cmake/download-models.cmake
+)
+set_tests_properties(test-download-model PROPERTIES FIXTURES_SETUP test-download-model)
+
+llama_build_and_test(test-thread-safety.cpp ARGS -m "${MODEL_DEST}" -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4 -t 2)
+set_tests_properties(test-thread-safety PROPERTIES FIXTURES_REQUIRED test-download-model)
llama_build_and_test(test-arg-parser.cpp)
# Test for state restore with fragmented KV cache
# Requires a model, uses same args pattern as test-thread-safety
-llama_build_and_test(test-state-restore-fragmented.cpp LABEL "model" ARGS -m "${LLAMA_DOWNLOAD_MODEL}")
+llama_build_and_test(test-state-restore-fragmented.cpp LABEL "model" ARGS -m "${MODEL_DEST}")
+set_tests_properties(test-state-restore-fragmented PROPERTIES FIXTURES_REQUIRED test-download-model)
if (NOT GGML_BACKEND_DL)
# these tests use the backends directly and cannot be built with dynamic loading