]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
ci: server: tests python env on github container ubuntu latest / fix n_predict (...
authorPierrick Hymbert <redacted>
Sat, 27 Apr 2024 15:50:48 +0000 (17:50 +0200)
committerGitHub <redacted>
Sat, 27 Apr 2024 15:50:48 +0000 (17:50 +0200)
* ci: server: fix python env

* ci: server: fix server tests after #6638

* ci: server: fix windows is not building PR branch

.github/workflows/server.yml
examples/server/server.cpp

index a044d6044ce8691d81a9cd3ef250ece761e74c4e..79cd7d643d20eb1e7c706961e14f4f3cac20b22d 100644 (file)
@@ -41,24 +41,16 @@ jobs:
             sanitizer: ""
       fail-fast: false # While -DLLAMA_SANITIZE_THREAD=ON is broken
 
-    container:
-      image: ubuntu:latest
-      ports:
-        - 8888
-      options: --cpus 4
-
     steps:
       - name: Dependencies
         id: depends
         run: |
-          apt-get update
-          apt-get -y install \
+          sudo apt-get update
+          sudo apt-get -y install \
             build-essential \
             xxd \
             git \
             cmake \
-            python3-pip \
-            python3-venv \
             curl \
             wget \
             language-pack-en \
@@ -71,6 +63,17 @@ jobs:
           fetch-depth: 0
           ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
 
+      - name: Python setup
+        id: setup_python
+        uses: actions/setup-python@v5
+        with:
+          python-version: '3.11'
+
+      - name: Tests dependencies
+        id: test_dependencies
+        run: |
+          pip install -r examples/server/tests/requirements.txt
+
       - name: Verify server deps
         id: verify_server_deps
         run: |
@@ -101,13 +104,6 @@ jobs:
               -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON ;
           cmake --build . --config ${{ matrix.build_type }} -j $(nproc) --target server
 
-      - name: Setup python env
-        id: pipenv
-        run: |
-          cd examples/server/tests
-          python3 -m venv venv
-          . venv/bin/activate
-          pip install -r requirements.txt
 
       - name: Tests
         id: server_integration_tests
@@ -133,6 +129,7 @@ jobs:
         uses: actions/checkout@v4
         with:
           fetch-depth: 0
+          ref: ${{ github.event.inputs.sha || github.event.pull_request.head.sha || github.sha || github.head_ref || github.ref_name }}
 
       - name: libCURL
         id: get_libcurl
index 6f8ba3fc65d9f0c4c5660bd6a3b4caeee6fd1c81..2760aea8fd3e9bbb5c4c0d8d3449d6340453f6dd 100644 (file)
@@ -1208,7 +1208,7 @@ struct server_context {
         }
 
         auto n_ctx_train = llama_n_ctx_train(model);
-        if (slot.params.n_predict < 1 && slot.ga_n == 1
+        if (slot.params.n_predict < 1 && slot.n_predict < 1 && slot.ga_n == 1
                     && slot.n_prompt_tokens + slot.n_decoded >= n_ctx_train) {
             LOG_WARNING("n_predict is not set and self-context extend is disabled."
                         " Limiting generated tokens to n_ctx_train to avoid EOS-less generation infinite loop", {