]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
[SYCL] replace llama-cli by llama-completion to rm the impact to test script (#18290)
authorNeo Zhang <redacted>
Tue, 23 Dec 2025 04:59:12 +0000 (12:59 +0800)
committerGitHub <redacted>
Tue, 23 Dec 2025 04:59:12 +0000 (12:59 +0800)
* replace llama-cli by llama-completion to rm the impact to test script

* Update examples/sycl/run-llama2.sh

Co-authored-by: Sigbjørn Skjæret <redacted>
* Update examples/sycl/run-llama2.sh

Co-authored-by: Sigbjørn Skjæret <redacted>
* Update examples/sycl/run-llama3.sh

Co-authored-by: Sigbjørn Skjæret <redacted>
* Update examples/sycl/run-llama3.sh

Co-authored-by: Sigbjørn Skjæret <redacted>
* Update examples/sycl/win-run-llama2.bat

Co-authored-by: Sigbjørn Skjæret <redacted>
* Update examples/sycl/win-run-llama3.bat

Co-authored-by: Sigbjørn Skjæret <redacted>
---------

Co-authored-by: Neo Zhang Jianyu <redacted>
Co-authored-by: Sigbjørn Skjæret <redacted>
examples/sycl/run-llama2.sh
examples/sycl/run-llama3.sh
examples/sycl/win-run-llama2.bat
examples/sycl/win-run-llama3.bat

index a018e4519724c0dd9991f189b7bbe9a7df6f240e..cf23619ee04b3cf23373e58a7017ba6844570e22 100755 (executable)
@@ -22,9 +22,9 @@ if [ $# -gt 0 ]; then
     GGML_SYCL_DEVICE=$1
     echo "use $GGML_SYCL_DEVICE as main GPU"
     #use signle GPU only
-    ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m ${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONTEXT} -mg $GGML_SYCL_DEVICE -sm none
+    ZES_ENABLE_SYSMAN=1 ./build/bin/llama-completion -m ${MODEL_FILE} -no-cnv -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONTEXT} -mg $GGML_SYCL_DEVICE -sm none
 
 else
     #use multiple GPUs with same max compute units
-    ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m ${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONTEXT}
+    ZES_ENABLE_SYSMAN=1 ./build/bin/llama-completion -m ${MODEL_FILE} -no-cnv -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONTEXT}
 fi
index 47702557031cd7a0c1374a72b036892544f428ec..feee5165e92929bb9ddd601f7288cc991bab7cf3 100755 (executable)
@@ -24,8 +24,8 @@ export UR_L0_ENABLE_RELAXED_ALLOCATION_LIMITS=1
 if [ $# -gt 0 ]; then
     GGML_SYCL_DEVICE=$1
     echo "Using $GGML_SYCL_DEVICE as the main GPU"
-    ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m ${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONTEXT} -mg $GGML_SYCL_DEVICE -sm none
+    ZES_ENABLE_SYSMAN=1 ./build/bin/llama-completion -m ${MODEL_FILE} -no-cnv -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONTEXT} -mg $GGML_SYCL_DEVICE -sm none
 else
     #use multiple GPUs with same max compute units
-    ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m ${MODEL_FILE} -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONTEXT}
+    ZES_ENABLE_SYSMAN=1 ./build/bin/llama-completion -m ${MODEL_FILE} -no-cnv -p "${INPUT_PROMPT}" -n 400 -e -ngl ${NGL} -s 0 -c ${CONTEXT}
 fi
index b654f88f62c8d0f38ff49b6f6a399c37b653abe2..32ff673ae2664f7d17054ab860531f215bbd77f3 100644 (file)
@@ -8,4 +8,4 @@ set INPUT2="Building a website can be done in 10 simple steps:\nStep 1:"
 :: support malloc device memory more than 4GB.
 set UR_L0_ENABLE_RELAXED_ALLOCATION_LIMITS=1
 
-.\build\bin\llama-cli.exe -m models\llama-2-7b.Q4_0.gguf -p %INPUT2% -n 400 -e -ngl 99 -s 0
+.\build\bin\llama-completion.exe -m models\llama-2-7b.Q4_0.gguf -no-cnv -p %INPUT2% -n 400 -e -ngl 99 -s 0
index 608b834f60e47c9c362a147cacabe145b07263e1..ea4ae69d6c7eb5802a0b83b9ed0596c2e451d466 100644 (file)
@@ -8,4 +8,4 @@ set INPUT2="Building a website can be done in 10 simple steps:\nStep 1:"
 :: support malloc device memory more than 4GB.
 set UR_L0_ENABLE_RELAXED_ALLOCATION_LIMITS=1
 
-.\build\bin\llama-cli.exe -m models\Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf -p %INPUT2% -n 400 -s 0 -e -ngl 99
+.\build\bin\llama-completion.exe -m models\Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf -no-cnv -p %INPUT2% -n 400 -s 0 -e -ngl 99