From: slaren Date: Thu, 21 Mar 2024 12:59:53 +0000 (+0100) Subject: cuda : fix LLAMA_CUDA_F16 build (llama/6197) X-Git-Tag: upstream/0.0.1642~822 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=1f86de6d657c01e61b6b9fb6bc71ef8f6bcc048b;p=pkg%2Fggml%2Fsources%2Fggml cuda : fix LLAMA_CUDA_F16 build (llama/6197) --- diff --git a/src/ggml-cuda.cu b/src/ggml-cuda.cu index 280839ea..04c6f5d0 100644 --- a/src/ggml-cuda.cu +++ b/src/ggml-cuda.cu @@ -9453,7 +9453,7 @@ static void ggml_cuda_op_dequantize_mul_mat_vec( // on some GPUs it is faster to convert src1 to half and to use half precision intrinsics #ifdef GGML_CUDA_F16 - cuda_pool_alloc src1_dfloat_a; + ggml_cuda_pool_alloc src1_dfloat_a(ctx.pool()); half * src1_dfloat = nullptr; // dfloat == half bool src1_convert_f16 =