]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : remove check flash_attn with lora (#11104)
authorXuan Son Nguyen <redacted>
Mon, 6 Jan 2025 12:41:12 +0000 (13:41 +0100)
committerGitHub <redacted>
Mon, 6 Jan 2025 12:41:12 +0000 (13:41 +0100)
src/llama.cpp

index c162c31a67a40864b274b229ed278ef5af6d80a3..ebd6e3b2941c5132e9025bc1b1ac06fb3a1626ae 100644 (file)
@@ -11519,13 +11519,7 @@ int32_t llama_lora_adapter_set(
             struct llama_context * ctx,
             struct llama_lora_adapter * adapter,
             float scale) {
-    if (ctx->cparams.flash_attn) {
-        LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__);
-        return -1;
-    }
-
     ctx->lora_adapters[adapter] = scale;
-
     return 0;
 }