From: Xuan Son Nguyen Date: Mon, 6 Jan 2025 12:41:12 +0000 (+0100) Subject: llama : remove check flash_attn with lora (#11104) X-Git-Tag: upstream/0.0.4488~61 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=09186fabbe05236f2b9446ba6c643cb737540d10;p=pkg%2Fggml%2Fsources%2Fllama.cpp llama : remove check flash_attn with lora (#11104) --- diff --git a/src/llama.cpp b/src/llama.cpp index c162c31a..ebd6e3b2 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -11519,13 +11519,7 @@ int32_t llama_lora_adapter_set( struct llama_context * ctx, struct llama_lora_adapter * adapter, float scale) { - if (ctx->cparams.flash_attn) { - LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__); - return -1; - } - ctx->lora_adapters[adapter] = scale; - return 0; }