]> git.djapps.eu Git - pkg/ggml/sources/whisper.cpp/commitdiff
talk-llama : fix n_gpu_layers usage again (#1442)
authorJhen-Jie Hong <redacted>
Tue, 7 Nov 2023 08:51:27 +0000 (16:51 +0800)
committerGitHub <redacted>
Tue, 7 Nov 2023 08:51:27 +0000 (10:51 +0200)
examples/talk-llama/talk-llama.cpp

index bc0119a7f894c1f6744617a4284f0c5da8e6d320..8c41ef542dc8468a1e94f195c5ff7d1985802186 100644 (file)
@@ -267,7 +267,7 @@ int main(int argc, char ** argv) {
 
     auto lmparams = llama_model_default_params();
     if (!params.use_gpu) {
-        lcparams.lmparams = 0;
+        lmparams.n_gpu_layers = 0;
     }
 
     struct llama_model * model_llama = llama_load_model_from_file(params.model_llama.c_str(), lmparams);