]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
Correct free memory and total memory. (#6630)
authorMasterYi1024 <redacted>
Fri, 12 Apr 2024 08:28:12 +0000 (16:28 +0800)
committerGitHub <redacted>
Fri, 12 Apr 2024 08:28:12 +0000 (10:28 +0200)
Co-authored-by: MasterYi <redacted>
llama.cpp

index 73ff60706288429d1e510321bc101c13e8bb6f16..dad2c4fbffb164c37f338b97f821486e8fd35d53 100644 (file)
--- a/llama.cpp
+++ b/llama.cpp
@@ -1638,17 +1638,17 @@ static size_t llama_get_device_memory(int device) {
 #if defined(GGML_USE_CUDA)
     size_t total;
     size_t free;
-    ggml_backend_cuda_get_device_memory(device, &total, &free);
+    ggml_backend_cuda_get_device_memory(device, &free, &total);
     return free;
 #elif defined(GGML_USE_SYCL)
     size_t total;
     size_t free;
-    ggml_backend_sycl_get_device_memory(device, &total, &free);
+    ggml_backend_sycl_get_device_memory(device, &free, &total);
     return free;
 #elif defined(GGML_USE_VULKAN)
     size_t total;
     size_t free;
-    ggml_backend_vk_get_device_memory(device, &total, &free);
+    ggml_backend_vk_get_device_memory(device, &free, &total);
     return free;
 #else
     return 1;