From: Judd Date: Fri, 26 Jul 2024 08:38:12 +0000 (+0800) Subject: llama : fix order of parameters (#8706) X-Git-Tag: upstream/0.0.4488~1020 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=01245f5b1629075543bc4478418c7d72a0b4b3c7;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : fix order of parameters (#8706) usage of `aclrtGetMemInfo` is correct: https://www.hiascend.com/doc_center/source/zh/canncommercial/63RC2/inferapplicationdev/aclcppdevg/aclcppdevg_03_0103.html Co-authored-by: Judd --- diff --git a/src/llama.cpp b/src/llama.cpp index bc018374..77f7d32f 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -2905,7 +2905,7 @@ static size_t llama_get_device_memory(const llama_model & model, int device) { #elif defined(GGML_USE_CANN) size_t total; size_t free; - ggml_backend_cann_get_device_memory(device, &total, &free); + ggml_backend_cann_get_device_memory(device, &free, &total); return free; #else return 1;