]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Removes multiple newlines at the end of files that is breaking the editorconfig step...
authorClint Herron <redacted>
Tue, 2 Jul 2024 16:18:10 +0000 (12:18 -0400)
committerGitHub <redacted>
Tue, 2 Jul 2024 16:18:10 +0000 (12:18 -0400)
22 files changed:
.github/ISSUE_TEMPLATE/config.yml
common/common.h
examples/embedding/README.md
examples/infill/infill.cpp
examples/lookup/README.md
examples/main-cmake-pkg/.gitignore
examples/main-cmake-pkg/CMakeLists.txt
examples/server-embd.py
examples/server/tests/features/passkey.feature
examples/server/themes/buttons-top/index.html
examples/server/themes/wild/index.html
examples/sycl/run-llama2.sh
examples/sycl/win-build-sycl.bat
examples/sycl/win-run-llama2.bat
ggml/include/ggml-metal.h
ggml/src/ggml-cuda/cpy.cu
ggml/src/ggml-metal.metal
ggml/src/ggml-quants.h
ggml/src/ggml-vulkan-shaders.hpp
scripts/pod-llama.sh
src/unicode-data.cpp
tests/test-rope.cpp

index c88134dbb644a598526511979938d5c2db144c03..eb8c4b472df4c4fb22e7190a320dbf800eea4807 100644 (file)
@@ -9,5 +9,3 @@ contact_links:
   - name: Want to contribute?
     url: https://github.com/ggerganov/llama.cpp/wiki/contribute
     about: Head to the contribution guide page of the wiki for areas you can help with
-
-
index 627b7ed854757eb33252895c13152ed6c563acb6..65c0ef81adf7cac183384781d402de56a9fab19a 100644 (file)
@@ -459,4 +459,3 @@ void yaml_dump_string_multiline(FILE * stream, const char * prop_name, const cha
 void yaml_dump_non_result_info(
     FILE * stream, const gpt_params & params, const llama_context * lctx,
     const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
-
index 86df1895878465c11482effa9bed005ea5528f10..e3705b4547677250cfd0bb58a0664175f286842d 100644 (file)
@@ -58,4 +58,3 @@ The above command will output space-separated float values.
 ```powershell
 embedding.exe -p 'Castle<#sep#>Stronghold<#sep#>Dog<#sep#>Cat' --embd-separator '<#sep#>' --embd-normalize 2  --embd-output-format '' -m './path/to/model.gguf' --n-gpu-layers 99 --log-disable 2>/dev/null
 ```
-
index ca71dd687f30e00f9895e3f5de8c741647a70d53..0e682154d5f6be7a3eafde5b0cc33f1f3f62cfe0 100644 (file)
@@ -659,4 +659,3 @@ int main(int argc, char ** argv) {
 
     return 0;
 }
-
index 5bfb0de9360414aa92c9375ad3352099ecd87e6f..71c345c037a2fbfb7ee00640f7844f1d53143152 100644 (file)
@@ -10,4 +10,3 @@ More info:
 
 https://github.com/ggerganov/llama.cpp/pull/4484
 https://github.com/ggerganov/llama.cpp/issues/4226
-
index e32c11c7f4653cd6b1db1af13873d1ea50095544..67c01d64cb7ab21f9e6b59c018dd849f7f9aa5db 100644 (file)
@@ -48,4 +48,3 @@
 build*/
 out/
 tmp/
-
index a97ded3653f0cb64b6af5a8cda715f6fdaea0749..3b38db292320f813b011d6b41f94179f00979556 100644 (file)
@@ -30,4 +30,3 @@ target_include_directories(${TARGET} PRIVATE ${_common_path})
 install(TARGETS ${TARGET} RUNTIME)
 target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
 target_compile_features(${TARGET} PRIVATE cxx_std_11)
-
index 118e042716c02065c7daf83bd537652ead5ba14d..a9a36a44ccac5d7303aa1e0b0aeefa324b659a25 100644 (file)
@@ -31,4 +31,3 @@ for i in range(n-1):
         embedding2 = np.array(result[j])
         similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
         print(f"Similarity between {i} and {j}: {similarity:.2f}")
-
index 1bde7aab8bab0db67e0f81ef766c0cdc8cb3069e..6a5a84e6a194177ea4b5ad17bfe803cb0ab02f39 100644 (file)
@@ -52,4 +52,3 @@ Feature: Passkey / Self-extend with context shift
       #| TheBloke/Llama-2-7B-GGUF        | llama-2-7b.Q2_K.gguf        | 4096        | 3   | 16384 | 512     | 4    | 512    | 500    | 300   | 1234    | 5           | 1234           |
       #| TheBloke/Mixtral-8x7B-v0.1-GGUF | mixtral-8x7b-v0.1.Q2_K.gguf | 32768       | 2   | 16384 | 512     | 4    | 512    | 500    | 100   | 0987    | 5           | 0
       # 987           |
-
index 6af30d307a4b5b5ff06111bf77572c5d721a11df..8334bcde5049cce2889f47c99a34e68beaa43e81 100644 (file)
 </body>
 
 </html>
-
index 772e716cdb2e074edfd5472bcee15ba984c20a42..8361c577494d72338ddc5d3e5ff356788ebe12fc 100644 (file)
 </body>
 
 </html>
-
index da0e4aaba688c88d6d5691a02e644b7ff8669a3a..111366fb036a51da6af6cdc63953c8abbe03e6af 100755 (executable)
@@ -34,4 +34,3 @@ fi
 
 #use multiple GPUs with same max compute units
 #ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/llama-2-7b.Q4_0.gguf -p "${INPUT2}" -n 400 -e -ngl 33 -s 0
-
index cdae5a52855a21a01e3445043ec12a4d605d7c00..17dd1ff5c169ec033e64b4b37aa70e624ed9f73c 100644 (file)
@@ -31,4 +31,3 @@ exit /B 0
 :ERROR
 echo comomand error: %errorlevel%
 exit /B %errorlevel%
-
index 1d4d7d2cdcb6fa03862c00fda409c7fd6e7d7fe7..f0385cdf0783e69d122dcc7610560ee947e15b82 100644 (file)
@@ -7,5 +7,3 @@ set INPUT2="Building a website can be done in 10 simple steps:\nStep 1:"
 
 
 .\build\bin\main.exe -m models\llama-2-7b.Q4_0.gguf -p %INPUT2% -n 400 -e -ngl 33 -s 0
-
-
index e7543ae795d284dccef64edd663095cab2782e4a..6c3226c37e0ef48072ddf146d73a392cf1d9fab1 100644 (file)
@@ -63,4 +63,3 @@ GGML_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend);
 #ifdef __cplusplus
 }
 #endif
-
index 12d741f017d3b4bdebae6d5b719d30c737ac2aea..3db57034b488d989770e3b62ad0aecbf739dea59 100644 (file)
@@ -487,4 +487,3 @@ void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) {
         GGML_ASSERT(false);
     }
 }
-
index e2796fd601281166213e81575fe56b54c97f2c72..c3503479b35bac3c1ad919b3b13d4e56f1df7176 100644 (file)
@@ -6537,4 +6537,3 @@ template [[host_name("kernel_mul_mv_id_iq3_s_f32")]]   kernel kernel_mul_mv_id_t
 template [[host_name("kernel_mul_mv_id_iq2_s_f32")]]   kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_s_f32_impl>>;
 template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]]  kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_nl_f32_impl>>;
 template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]]  kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_xs_f32_impl>>;
-
index 4d436a8f06b3e5081205dd568eb12d17443d4ee0..30983b8728fa24c3d655bf6a75ad4075525b2061 100644 (file)
@@ -130,4 +130,3 @@ void iq3xs_free_impl(int grid_size);
 #ifdef __cplusplus
 }
 #endif
-
index 01ff66f71fcf047bd63974b8ff33a4bb9756b5ae..f0c4c6baf592b2ab97df26a27dfddb2e4b487a50 100644 (file)
@@ -144954,4 +144954,3 @@ unsigned char sum_rows_f32_data[] = {
 
 };
 const uint64_t sum_rows_f32_len = 2112;
-
index 586d6ea18af01ef6bafbe10fd5f8d13705fece79..0d6d4032d8a9e5f6b21c7efe6aecf46d99bcdb86 100644 (file)
@@ -210,4 +210,3 @@ fi
 # more benches
 #GGML_CUDA=1 make -j && ./llama-batched-bench ./models/codellama-7b/ggml-model-q4_k.gguf  4096 1 99 1 512,3200 128,128,800 1
 #GGML_CUDA=1 make -j && ./llama-batched-bench ./models/codellama-13b/ggml-model-q4_k.gguf 4096 1 99 1 512,3200 128,128,800 1
-
index 4a939898b367f5a24db8286cc8f46272b2d9ceca..02bdf782380fe7f9928e979ac549f5221ea70b51 100644 (file)
@@ -7030,4 +7030,3 @@ const std::vector<range_nfd> unicode_ranges_nfd = {  // start, last, nfd
 {0x02FA1C, 0x02FA1C, 0x009F3B},
 {0x02FA1D, 0x02FA1D, 0x02A600},
 };
-
index f0895ffaad6a10fb6c92b9de408fc80c3e14ba36..8159e276af617bb8f6b584fdef4fbd701f035726 100644 (file)
@@ -218,4 +218,3 @@ int main(int /*argc*/, const char ** /*argv*/) {
 
     return 0;
 }
-