]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
ggml : add build-time message to remind about ggml_set_rows (#14661)
authorGeorgi Gerganov <redacted>
Sun, 13 Jul 2025 07:36:33 +0000 (10:36 +0300)
committerGitHub <redacted>
Sun, 13 Jul 2025 07:36:33 +0000 (10:36 +0300)
ggml-ci

ggml/src/ggml-cann/ggml-cann.cpp
ggml/src/ggml-cuda/ggml-cuda.cu
ggml/src/ggml-opencl/ggml-opencl.cpp
ggml/src/ggml-sycl/ggml-sycl.cpp

index ccb17eb072eb23ecf69766d4b4078f0110fa511a..e5e11d4cdced983e8bbe82ae377a3fd1ffc34361 100755 (executable)
@@ -2090,6 +2090,7 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev,
             {
                 // TODO: add support
                 // ref: https://github.com/ggml-org/llama.cpp/pull/14274
+#pragma message("TODO: implement F32, F16, BF16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, IQ4_NL support (https://github.com/ggml-org/llama.cpp/pull/14661)")
                 return false;
             } break;
         case GGML_OP_CPY: {
index 88b17dd682c95845cc356c7e8de7370ce7457165..1478245998a3dd17793e4e9ec42c7b9e06e83bfa 100644 (file)
@@ -3222,6 +3222,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
             } break;
         case GGML_OP_SET_ROWS:
             {
+#pragma message("TODO: implement BF16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, IQ4_NL support (https://github.com/ggml-org/llama.cpp/pull/14661)")
                 return (op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16) &&
                        op->src[0]->type == GGML_TYPE_F32 &&
                        op->src[1]->type == GGML_TYPE_I64;
index 58830b733a8af444463007d3cd5b4b58f157da32..3388259152b4617acf1727f27f18b3849a1377d0 100644 (file)
@@ -2280,6 +2280,7 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te
             {
                 // TODO: add support
                 // ref: https://github.com/ggml-org/llama.cpp/pull/14274
+#pragma message("TODO: implement BF16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, IQ4_NL support (https://github.com/ggml-org/llama.cpp/pull/14661)")
                 if (op->src[0]->type != GGML_TYPE_F32) {
                     return false;
                 }
index 65b26fd02766e94a7580a3b7f13d35b617e3045a..7f74fbfe5c14ceb539a5ab0894ee697b4fbc2445 100644 (file)
@@ -4303,6 +4303,7 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g
             {
                 // TODO: add support
                 // ref: https://github.com/ggml-org/llama.cpp/pull/14274
+#pragma message("TODO: implement BF16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, IQ4_NL support (https://github.com/ggml-org/llama.cpp/pull/14661)")
                 return (op->type == GGML_TYPE_F32 || (op->type == GGML_TYPE_F16 && op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_I64));
             } break;
         case GGML_OP_CPY: