]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
metal : support for Swift (#3078)
authorkchro3 <redacted>
Sat, 9 Sep 2023 09:12:10 +0000 (02:12 -0700)
committerGitHub <redacted>
Sat, 9 Sep 2023 09:12:10 +0000 (17:12 +0800)
* Metal support for Swift

* update

* add a toggle for arm/arm64

* set minimum versions for all platforms

* update to use newLibraryWithURL

* bump version

Co-authored-by: Jhen-Jie Hong <redacted>
---------

Co-authored-by: Jhen-Jie Hong <redacted>
Package.swift
ggml-metal.m

index 96f52c4f0caadbfa0bd8dcd1a68822cf64765d84..fb95ef7ebc59f05ffa63c48e74eefd36f098fe65 100644 (file)
@@ -2,8 +2,30 @@
 
 import PackageDescription
 
+#if arch(arm) || arch(arm64)
+let platforms: [SupportedPlatform]? = [
+    .macOS(.v11),
+    .iOS(.v14),
+    .watchOS(.v4),
+    .tvOS(.v14)
+]
+let exclude: [String] = []
+let additionalSources: [String] = ["ggml-metal.m"]
+let additionalSettings: [CSetting] = [
+    .unsafeFlags(["-fno-objc-arc"]),
+    .define("GGML_SWIFT"),
+    .define("GGML_USE_METAL")
+]
+#else
+let platforms: [SupportedPlatform]? = nil
+let exclude: [String] = ["ggml-metal.metal"]
+let additionalSources: [String] = []
+let additionalSettings: [CSetting] = []
+#endif
+
 let package = Package(
     name: "llama",
+    platforms: platforms,
     products: [
         .library(name: "llama", targets: ["llama"]),
     ],
@@ -11,23 +33,23 @@ let package = Package(
         .target(
             name: "llama",
             path: ".",
-            exclude: ["ggml-metal.metal"],
+            exclude: exclude,
             sources: [
                 "ggml.c",
                 "llama.cpp",
                 "ggml-alloc.c",
-                "k_quants.c"
-            ],
+                "k_quants.c",
+            ] + additionalSources,
             publicHeadersPath: "spm-headers",
             cSettings: [
                 .unsafeFlags(["-Wno-shorten-64-to-32"]),
                 .define("GGML_USE_K_QUANTS"),
                 .define("GGML_USE_ACCELERATE")
-            ],
+            ] + additionalSettings,
             linkerSettings: [
                 .linkedFramework("Accelerate")
             ]
-        ),
+        )
     ],
     cxxLanguageStandard: .cxx11
 )
index 1e6845b979e4528e579cb5740094f46af7fab39f..b577d7f6088a425155f2df3f76ea8668e7d2c8bb 100644 (file)
@@ -144,12 +144,20 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
 
     ctx->d_queue = dispatch_queue_create("llama.cpp", DISPATCH_QUEUE_CONCURRENT);
 
-#if 0
-    // compile from source string and show compile log
+#ifdef GGML_SWIFT
+    // load the default.metallib file
     {
         NSError * error = nil;
 
-        ctx->library = [ctx->device newLibraryWithSource:msl_library_source options:nil error:&error];
+        NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
+        NSString * llamaBundlePath = [bundle pathForResource:@"llama_llama" ofType:@"bundle"];
+        NSBundle * llamaBundle = [NSBundle bundleWithPath:llamaBundlePath];
+        NSString * libPath = [llamaBundle pathForResource:@"default" ofType:@"metallib"];
+        NSURL * libURL = [NSURL fileURLWithPath:libPath];
+
+        // Load the metallib file into a Metal library
+        ctx->library = [ctx->device newLibraryWithURL:libURL error:&error];
+
         if (error) {
             metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]);
             return NULL;