]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : make tensor_split ptr instead of array (#2272)
authorGeorgi Gerganov <redacted>
Fri, 21 Jul 2023 10:10:51 +0000 (13:10 +0300)
committerGitHub <redacted>
Fri, 21 Jul 2023 10:10:51 +0000 (13:10 +0300)
examples/common.cpp
ggml-cuda.cu
llama.cpp
llama.h

index fd6dbc0e3e6415308959a957a247561c700b8ef7..476d565949debe155b0175750eea151871d6d0f1 100644 (file)
@@ -586,7 +586,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
     lparams.n_batch      = params.n_batch;
     lparams.n_gpu_layers = params.n_gpu_layers;
     lparams.main_gpu     = params.main_gpu;
-    memcpy(lparams.tensor_split, params.tensor_split, LLAMA_MAX_DEVICES*sizeof(float));
+    lparams.tensor_split = params.tensor_split;
     lparams.low_vram     = params.low_vram;
     lparams.seed         = params.seed;
     lparams.f16_kv       = params.memory_f16;
index d3054a7fac71d344918303480240bcd24d4d2b9b..6537897b990f458b1928e3df3d3425276136051f 100644 (file)
@@ -2512,6 +2512,9 @@ void ggml_init_cublas() {
 }
 
 void ggml_cuda_set_tensor_split(const float * tensor_split) {
+    if (tensor_split == nullptr) {
+        return;
+    }
     bool all_zero = true;
     for (int i = 0; i < g_device_count; ++i) {
         if (tensor_split[i] != 0.0f) {
index 796dfdacbc34e8351a1fde5d7024c3c18ab428c9..23e746d625fe44be9651d0841988b2f5d896dc11 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -849,7 +849,7 @@ struct llama_context_params llama_context_default_params() {
         /*.n_batch                     =*/ 512,
         /*.gpu_layers                  =*/ 0,
         /*.main_gpu                    =*/ 0,
-        /*.tensor_split                =*/ {0},
+        /*.tensor_split                =*/ nullptr,
         /*.rope_freq_base              =*/ 10000.0f,
         /*.rope_freq_scale             =*/ 1.0f,
         /*.progress_callback           =*/ nullptr,
@@ -1289,7 +1289,7 @@ static bool llama_model_load(
         int n_batch,
         int n_gpu_layers,
         int main_gpu,
-        float * tensor_split,
+        const float * tensor_split,
         float rope_freq_base,
         float rope_freq_scale,
         bool low_vram,
diff --git a/llama.h b/llama.h
index b676a383b31aa4f700adabf5b47c88d786d0d963..c565f6a001cf3320d0b8fdafa429c6eff888171d 100644 (file)
--- a/llama.h
+++ b/llama.h
@@ -88,7 +88,8 @@ extern "C" {
         int32_t  n_batch;                      // prompt processing batch size
         int32_t  n_gpu_layers;                 // number of layers to store in VRAM
         int32_t  main_gpu;                     // the GPU that is used for scratch and small tensors
-        float tensor_split[LLAMA_MAX_DEVICES]; // how to split layers across multiple GPUs
+
+        const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES)
 
         // ref: https://github.com/ggerganov/llama.cpp/pull/2054
         float    rope_freq_base;  // RoPE base frequency