From: Georgi Gerganov Date: Mon, 17 Jul 2023 13:35:06 +0000 (+0300) Subject: ci : use models-mnt X-Git-Tag: upstream/0.0.1642~1310 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=133f7d578d7c0614c70946a849e4ebf1bdeec975;p=pkg%2Fggml%2Fsources%2Fggml ci : use models-mnt --- diff --git a/ci/run.sh b/ci/run.sh index a3aaf688..c23d8d39 100644 --- a/ci/run.sh +++ b/ci/run.sh @@ -4,7 +4,8 @@ sd=`dirname $0` cd $sd/../ SRC=`pwd` -OUT=$1 +OUT="$1" +MNT="$2" ## helpers @@ -112,13 +113,13 @@ function gg_sum_ctest_release { function gg_run_gpt_2 { cd ${SRC} - gg_wget models/gpt-2 https://huggingface.co/ggerganov/ggml/resolve/main/ggml-model-gpt-2-117M.bin + gg_wget models-mnt/gpt-2 https://huggingface.co/ggerganov/ggml/resolve/main/ggml-model-gpt-2-117M.bin cd build-ci-release set -e - model="../models/gpt-2/ggml-model-gpt-2-117M.bin" + model="../models-mnt/gpt-2/ggml-model-gpt-2-117M.bin" prompts="../examples/prompts/gpt-2.txt" (time ./bin/gpt-2 --model ${model} -s 1234 -n 64 -t 4 -tt ${prompts} ) 2>&1 | tee -a $OUT/${ci}-tg.log @@ -142,19 +143,19 @@ function gg_sum_gpt_2 { function gg_run_mpt { cd ${SRC} - gg_wget models/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/raw/main/config.json - gg_wget models/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/raw/main/tokenizer.json - gg_wget models/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/raw/main/tokenizer_config.json - gg_wget models/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/raw/main/pytorch_model.bin.index.json - gg_wget models/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/raw/main/configuration_mpt.py - gg_wget models/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/resolve/main/pytorch_model-00001-of-00002.bin - gg_wget models/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/resolve/main/pytorch_model-00002-of-00002.bin + gg_wget models-mnt/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/raw/main/config.json + gg_wget models-mnt/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/raw/main/tokenizer.json + gg_wget models-mnt/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/raw/main/tokenizer_config.json + gg_wget models-mnt/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/raw/main/pytorch_model.bin.index.json + gg_wget models-mnt/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/raw/main/configuration_mpt.py + gg_wget models-mnt/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/resolve/main/pytorch_model-00001-of-00002.bin + gg_wget models-mnt/mpt/7B/ https://huggingface.co/mosaicml/mpt-7b/resolve/main/pytorch_model-00002-of-00002.bin cd build-ci-release set -e - path_models="../models/mpt/7B" + path_models="../models-mnt/mpt/7B" model_f16="${path_models}/ggml-model-f16.bin" model_q4_0="${path_models}/ggml-model-q4_0.bin" @@ -180,10 +181,10 @@ function gg_sum_mpt { ## main if [ -z $GG_BUILD_LOW_PERF ]; then - rm -rf ${SRC}/models + rm -rf ${SRC}/models-mnt - mkdir -p $(realpath ${GG_GGML_MNT}/models) - ln -sfn ${GG_GGML_MNT}/models ${SRC}/models + mkdir -p $(realpath ${MNT}/models) + ln -sfn ${MNT}/models ${SRC}/models-mnt python3 -m pip install -r ${SRC}/requirements.txt fi