]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Honor -ngl option for Cuda offloading in llava (#3621)
authorM. Yusuf Sarıgöz <redacted>
Sat, 14 Oct 2023 10:52:44 +0000 (13:52 +0300)
committerGitHub <redacted>
Sat, 14 Oct 2023 10:52:44 +0000 (04:52 -0600)
examples/llava/llava.cpp

index 14dacc7807e8dcf992501c36dfe92d9ecf1e9b8a..8384d9d78ba3e0b29f5b148af510b1a0e346f5df 100644 (file)
@@ -79,7 +79,13 @@ int main(int argc, char ** argv) {
 
     llama_backend_init(params.numa);
 
-    llama_model_params model_params = llama_model_default_params();
+    llama_model_params model_params              = llama_model_default_params();
+                       model_params.n_gpu_layers = params.n_gpu_layers;
+                       model_params.main_gpu     = params.main_gpu;
+                       model_params.tensor_split = params.tensor_split;
+                       model_params.use_mmap     = params.use_mmap;
+                       model_params.use_mlock    = params.use_mlock;
+
     llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
     if (model == NULL) {
         fprintf(stderr , "%s: error: unable to load model\n" , __func__);