From: Georgi Gerganov Date: Thu, 23 Mar 2023 20:39:44 +0000 (+0200) Subject: Remove oboslete command from Docker script X-Git-Tag: gguf-v0.4.0~1136 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=4cc053b6d5e9df7ac21fa06b7208a70c156d4d7a;p=pkg%2Fggml%2Fsources%2Fllama.cpp Remove oboslete command from Docker script --- diff --git a/.devops/tools.sh b/.devops/tools.sh index 352e0494..b0196b60 100755 --- a/.devops/tools.sh +++ b/.devops/tools.sh @@ -16,11 +16,7 @@ elif [[ $arg1 == '--quantize' || $arg1 == '-q' ]]; then ./quantize $arg2 elif [[ $arg1 == '--run' || $arg1 == '-r' ]]; then ./main $arg2 -elif [[ $arg1 == '--download' || $arg1 == '-d' ]]; then - python3 ./download-pth.py $arg2 elif [[ $arg1 == '--all-in-one' || $arg1 == '-a' ]]; then - echo "Downloading model..." - python3 ./download-pth.py "$1" "$2" echo "Converting PTH to GGML..." for i in `ls $1/$2/ggml-model-f16.bin*`; do if [ -f "${i/f16/q4_0}" ]; then @@ -39,8 +35,6 @@ else echo " ex: \"/models/7B/\" 1" echo " --quantize (-q): Optimize with quantization process ggml" echo " ex: \"/models/7B/ggml-model-f16.bin\" \"/models/7B/ggml-model-q4_0.bin\" 2" - echo " --download (-d): Download original llama model from CDN: https://agi.gpt4.org/llama/" - echo " ex: \"/models/\" 7B" - echo " --all-in-one (-a): Execute --download, --convert & --quantize" + echo " --all-in-one (-a): Execute --convert & --quantize" echo " ex: \"/models/\" 7B" fi