From: Georgi Gerganov Date: Tue, 6 Jun 2023 19:54:39 +0000 (+0300) Subject: llama : fix vram_scratch var X-Git-Tag: gguf-v0.4.0~675 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=2d7bf110edd8c49209401a16132052cba706ffd0;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : fix vram_scratch var --- diff --git a/llama.cpp b/llama.cpp index cf512ccd..16d6f6ef 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1076,6 +1076,7 @@ static void llama_model_load_internal( // prepare memory for the weights size_t vram_weights = 0; + size_t vram_scratch = 0; { const uint32_t n_embd = hparams.n_embd; const uint32_t n_layer = hparams.n_layer; @@ -1152,8 +1153,9 @@ static void llama_model_load_internal( fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__, mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0); + (void) vram_scratch; #ifdef GGML_USE_CUBLAS - const size_t vram_scratch = n_batch * MB; + vram_scratch = n_batch * MB; ggml_cuda_set_scratch_size(vram_scratch); if (n_gpu_layers > 0) { fprintf(stderr, "%s: allocating batch_size x 1 MB = %ld MB VRAM for the scratch buffer\n",