id: cmake_test
run: |
cd build
- ctest -L 'main|curl' --verbose --timeout 900
+ ctest -L main --verbose --timeout 900
macOS-latest-cmake-x64:
runs-on: macos-15-intel
id: cmake_test
run: |
cd build
- ctest -L 'main|curl' --verbose --timeout 900
+ ctest -L main --verbose --timeout 900
- name: Test llama2c conversion
id: llama2c_test
id: depends
run: |
sudo apt-get update
- sudo apt-get install build-essential libcurl4-openssl-dev
+ sudo apt-get install build-essential
- name: Test
id: ggml-ci
id: depends
run: |
sudo apt-get update
- sudo apt-get install build-essential libcurl4-openssl-dev
+ sudo apt-get install build-essential
- name: Test
id: ggml-ci
id: depends
run: |
sudo apt-get update
- sudo apt-get install build-essential libcurl4-openssl-dev
+ sudo apt-get install build-essential
- name: Test
id: ggml-ci
id: depends
run: |
sudo apt-get update
- sudo apt-get install build-essential libcurl4-openssl-dev
+ sudo apt-get install build-essential
- name: Test
id: ggml-ci
id: depends
run: |
sudo apt-get update
- sudo apt-get install build-essential libcurl4-openssl-dev
+ sudo apt-get install build-essential
- name: Test
id: ggml-ci
id: depends
run: |
sudo apt-get update
- sudo apt-get install -y build-essential libcurl4-openssl-dev
+ sudo apt-get install -y build-essential
- name: Test
id: ggml-ci
id: cmake_test
run: |
cd build
- ctest -L 'main|curl' --verbose --timeout 900
+ ctest -L main --verbose --timeout 900
- name: Test llama2c conversion
id: llama2c_test
sudo DEBIAN_FRONTEND=noninteractive NEEDRESTART_MODE=a \
apt-get install -y \
build-essential \
- libcurl4-openssl-dev \
python3-venv \
gpg \
wget \
cd $sd/../
SRC=`pwd`
-CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=${LLAMA_FATAL_WARNINGS:-ON} -DLLAMA_CURL=ON -DGGML_SCHED_NO_REALLOC=ON"
+CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=${LLAMA_FATAL_WARNINGS:-ON} -DLLAMA_CURL=OFF -DGGML_SCHED_NO_REALLOC=ON"
if [ ! -z ${GG_BUILD_METAL} ]; then
CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON"
endif()
endif()
endfunction()
+
+function(llama_download_model NAME HASH)
+ set(DEST "${CMAKE_BINARY_DIR}/${NAME}")
+ get_filename_component(DEST_DIR "${DEST}" DIRECTORY)
+ file(MAKE_DIRECTORY "${DEST_DIR}")
+ if(NOT EXISTS "${DEST}")
+ message(STATUS "Downloading ${NAME} from ggml-org/models...")
+ endif()
+ file(DOWNLOAD
+ "https://huggingface.co/ggml-org/models/resolve/main/${NAME}?download=true"
+ "${DEST}"
+ TLS_VERIFY ON
+ EXPECTED_HASH ${HASH}
+ STATUS status
+ )
+ list(GET status 0 code)
+ if(NOT code EQUAL 0)
+ list(GET status 1 msg)
+ message(FATAL_ERROR "Failed to download ${NAME}: ${msg}")
+ endif()
+ set(LLAMA_DOWNLOAD_MODEL "${DEST}" PARENT_SCOPE)
+endfunction()
set(TEST_TARGET test-eval-callback)
if(NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
- add_test(NAME ${TEST_TARGET}
- COMMAND llama-eval-callback --hf-repo ggml-org/models --hf-file tinyllamas/stories260K.gguf --model stories260K.gguf --prompt hello --seed 42 -ngl 0)
+ llama_download_model("tinyllamas/stories15M-q4_0.gguf" SHA256=66967fbece6dbe97886593fdbb73589584927e29119ec31f08090732d1861739)
else()
- add_test(NAME ${TEST_TARGET}
- COMMAND llama-eval-callback --hf-repo ggml-org/models --hf-file tinyllamas/stories260K-be.gguf --model stories260K-be.gguf --prompt hello --seed 42 -ngl 0)
+ llama_download_model("tinyllamas/stories15M-be.Q4_0.gguf" SHA256=9aec857937849d976f30397e97eb1cabb53eb9dcb1ce4611ba8247fb5f44c65d)
endif()
-set_property(TEST ${TEST_TARGET} PROPERTY LABELS eval-callback curl)
+add_test(NAME ${TEST_TARGET} COMMAND llama-eval-callback -m "${LLAMA_DOWNLOAD_MODEL}" --prompt hello --seed 42 -ngl 0)
llama_build_and_test(test-regex-partial.cpp)
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
- llama_build_and_test(test-thread-safety.cpp ARGS -hf ggml-org/models -hff tinyllamas/stories15M-q4_0.gguf -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4 -t 2)
+ llama_download_model("tinyllamas/stories15M-q4_0.gguf" SHA256=66967fbece6dbe97886593fdbb73589584927e29119ec31f08090732d1861739)
else()
- llama_build_and_test(test-thread-safety.cpp ARGS -hf ggml-org/models -hff tinyllamas/stories15M-be.Q4_0.gguf -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4 -t 2)
+ llama_download_model("tinyllamas/stories15M-be.Q4_0.gguf" SHA256=9aec857937849d976f30397e97eb1cabb53eb9dcb1ce4611ba8247fb5f44c65d)
endif()
+llama_build_and_test(test-thread-safety.cpp ARGS -m "${LLAMA_DOWNLOAD_MODEL}" -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4 -t 2)
-# this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135)
-if (NOT WIN32)
- llama_build_and_test(test-arg-parser.cpp)
-endif()
+llama_build_and_test(test-arg-parser.cpp)
if (NOT LLAMA_SANITIZE_ADDRESS AND NOT GGML_SCHED_NO_REALLOC)
# TODO: repair known memory leaks
# Test for state restore with fragmented KV cache
# Requires a model, uses same args pattern as test-thread-safety
-if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
- llama_build_and_test(test-state-restore-fragmented.cpp LABEL "model" ARGS -hf ggml-org/models -hff tinyllamas/stories15M-q4_0.gguf)
-else()
- llama_build_and_test(test-state-restore-fragmented.cpp LABEL "model" ARGS -hf ggml-org/models -hff tinyllamas/stories15M-be.Q4_0.gguf)
-endif()
+llama_build_and_test(test-state-restore-fragmented.cpp LABEL "model" ARGS -m "${LLAMA_DOWNLOAD_MODEL}")
if (NOT GGML_BACKEND_DL)
# these tests use the backends directly and cannot be built with dynamic loading
assert(params.cpuparams.n_threads == 1010);
#endif // _WIN32
- printf("test-arg-parser: test curl-related functions\n\n");
+ printf("test-arg-parser: test download functions\n\n");
const char * GOOD_URL = "http://ggml.ai/";
const char * BAD_URL = "http://ggml.ai/404";