]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama : use cmake for swift build (#10525)
authorDiego Devesa <redacted>
Sun, 8 Dec 2024 11:14:54 +0000 (12:14 +0100)
committerGitHub <redacted>
Sun, 8 Dec 2024 11:14:54 +0000 (13:14 +0200)
* llama : use cmake for swift build

* swift : <> -> ""

* ci : remove make

* ci : disable ios build

* Revert "swift : <> -> """

This reverts commit d39ffd9556482b77d4ea5b118b453fc1c097a31d.

* ci : try fix ios build

* ci : cont

* ci : cont

---------

Co-authored-by: Georgi Gerganov <redacted>
.github/workflows/build.yml
Package.swift
Sources/llama/llama.h [new file with mode: 0644]
Sources/llama/module.modulemap [new file with mode: 0644]
cmake/llama.pc.in
examples/llama.swiftui/llama.cpp.swift/LibLlama.swift
examples/llama.swiftui/llama.swiftui.xcodeproj/project.pbxproj

index f3326a5fbab82a47dcdd248b1011bd2afb886144..886d33d2d5640172f10d86ddbde5644e743f14dd 100644 (file)
@@ -552,35 +552,44 @@ jobs:
             -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
           cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
 
-# TODO: tmp disabled. see for possible re-enable:
-#       https://github.com/ggerganov/llama.cpp/pull/10525
-#  macOS-latest-swift:
-#    runs-on: macos-latest
-#
-#    strategy:
-#      matrix:
-#        destination: ['generic/platform=macOS', 'generic/platform=iOS', 'generic/platform=tvOS']
-#
-#    steps:
-#      - name: Clone
-#        id: checkout
-#        uses: actions/checkout@v4
-#
-#      - name: Dependencies
-#        id: depends
-#        continue-on-error: true
-#        run: |
-#          brew update
-#
-#      - name: xcodebuild for swift package
-#        id: xcodebuild
-#        run: |
-#          xcodebuild -scheme llama -destination "${{ matrix.destination }}"
-#
-#      - name: Build Swift Example
-#        id: make_build_swift_example
-#        run: |
-#            make swift
+  macOS-latest-swift:
+    runs-on: macos-latest
+
+    strategy:
+      matrix:
+        destination: ['generic/platform=macOS', 'generic/platform=iOS', 'generic/platform=tvOS']
+
+    steps:
+      - name: Clone
+        id: checkout
+        uses: actions/checkout@v4
+
+      - name: Dependencies
+        id: depends
+        continue-on-error: true
+        run: |
+          brew update
+
+      - name: Build llama.cpp with CMake
+        id: cmake_build
+        run: |
+          sysctl -a
+          mkdir build
+          cd build
+          cmake -G Xcode .. \
+            -DGGML_METAL_USE_BF16=ON \
+            -DGGML_METAL_EMBED_LIBRARY=ON \
+            -DLLAMA_BUILD_EXAMPLES=OFF \
+            -DLLAMA_BUILD_TESTS=OFF \
+            -DLLAMA_BUILD_SERVER=OFF \
+            -DCMAKE_OSX_ARCHITECTURES="arm64;x86_64"
+          cmake --build . --config Release -j $(sysctl -n hw.logicalcpu)
+          sudo cmake --install . --config Release
+
+      - name: xcodebuild for swift package
+        id: xcodebuild
+        run: |
+          xcodebuild -scheme llama-Package -destination "${{ matrix.destination }}"
 
   windows-msys2:
     runs-on: windows-latest
@@ -1104,6 +1113,29 @@ jobs:
       - name: Checkout code
         uses: actions/checkout@v4
 
+      - name: Build
+        id: cmake_build
+        run: |
+          sysctl -a
+          mkdir build
+          cd build
+          cmake -G Xcode .. \
+            -DGGML_METAL_USE_BF16=ON \
+            -DGGML_METAL_EMBED_LIBRARY=ON \
+            -DLLAMA_BUILD_EXAMPLES=OFF \
+            -DLLAMA_BUILD_TESTS=OFF \
+            -DLLAMA_BUILD_SERVER=OFF \
+            -DCMAKE_SYSTEM_NAME=iOS \
+            -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \
+            -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml
+          cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO
+          sudo cmake --install . --config Release
+
+      - name: xcodebuild for swift package
+        id: xcodebuild
+        run: |
+          xcodebuild -scheme llama-Package -destination 'generic/platform=iOS'
+
       - name: Build Xcode project
         run: xcodebuild -project examples/llama.swiftui/llama.swiftui.xcodeproj -scheme llama.swiftui -sdk iphoneos CODE_SIGNING_REQUIRED=NO CODE_SIGN_IDENTITY= -destination 'generic/platform=iOS' build
 
@@ -1131,23 +1163,6 @@ jobs:
 
           ./gradlew build --no-daemon
 
-#  freeBSD-latest:
-#    runs-on: macos-12
-#    steps:
-#    - name: Clone
-#      uses: actions/checkout@v4
-#
-#    - name: Build
-#      uses: cross-platform-actions/action@v0.19.0
-#      with:
-#        operating_system: freebsd
-#        version: '13.2'
-#        hypervisor: 'qemu'
-#        run: |
-#            sudo pkg update
-#            sudo pkg install -y gmake automake autoconf pkgconf llvm15 openblas
-#            gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j `sysctl -n hw.ncpu`
-
   release:
     if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }}
 
index 3afeb2f1930e45f22c41048d7328525cdf5e1421..01c996d24203763dafc4bf7792306bef999be176 100644 (file)
@@ -2,60 +2,6 @@
 
 import PackageDescription
 
-var sources = [
-    "src/llama.cpp",
-    "src/llama-vocab.cpp",
-    "src/llama-grammar.cpp",
-    "src/llama-sampling.cpp",
-    "src/unicode.cpp",
-    "src/unicode-data.cpp",
-    "ggml/src/ggml.c",
-    "ggml/src/ggml-alloc.c",
-    "ggml/src/ggml-backend.cpp",
-    "ggml/src/ggml-backend-reg.cpp",
-    "ggml/src/ggml-cpu/ggml-cpu.c",
-    "ggml/src/ggml-cpu/ggml-cpu.cpp",
-    "ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp",
-    "ggml/src/ggml-cpu/ggml-cpu-hbm.cpp",
-    "ggml/src/ggml-cpu/ggml-cpu-quants.c",
-    "ggml/src/ggml-cpu/ggml-cpu-traits.cpp",
-    "ggml/src/ggml-threading.cpp",
-    "ggml/src/ggml-quants.c",
-]
-
-var resources: [Resource] = []
-var linkerSettings: [LinkerSetting] = []
-var cSettings: [CSetting] =  [
-    .unsafeFlags(["-Wno-shorten-64-to-32", "-O3", "-DNDEBUG"]),
-    .unsafeFlags(["-fno-objc-arc"]),
-    .headerSearchPath("ggml/src"),
-    .headerSearchPath("ggml/src/ggml-cpu"),
-    // NOTE: NEW_LAPACK will required iOS version 16.4+
-    // We should consider add this in the future when we drop support for iOS 14
-    // (ref: ref: https://developer.apple.com/documentation/accelerate/1513264-cblas_sgemm?language=objc)
-    // .define("ACCELERATE_NEW_LAPACK"),
-    // .define("ACCELERATE_LAPACK_ILP64")
-    .define("GGML_USE_CPU"),
-]
-
-
-#if canImport(Darwin)
-sources.append("ggml/src/ggml-common.h")
-sources.append("ggml/src/ggml-metal/ggml-metal.m")
-resources.append(.process("ggml/src/ggml-metal/ggml-metal.metal"))
-linkerSettings.append(.linkedFramework("Accelerate"))
-cSettings.append(
-    contentsOf: [
-        .define("GGML_USE_ACCELERATE"),
-        .define("GGML_USE_METAL"),
-    ]
-)
-#endif
-
-#if os(Linux)
-    cSettings.append(.define("_GNU_SOURCE"))
-#endif
-
 let package = Package(
     name: "llama",
     platforms: [
@@ -68,26 +14,6 @@ let package = Package(
         .library(name: "llama", targets: ["llama"]),
     ],
     targets: [
-        .target(
-            name: "llama",
-            path: ".",
-            exclude: [
-               "build",
-               "cmake",
-               "examples",
-               "scripts",
-               "models",
-               "tests",
-               "CMakeLists.txt",
-               "Makefile",
-               "ggml/src/ggml-metal-embed.metal"
-            ],
-            sources: sources,
-            resources: resources,
-            publicHeadersPath: "spm-headers",
-            cSettings: cSettings,
-            linkerSettings: linkerSettings
-        )
-    ],
-    cxxLanguageStandard: .cxx17
+        .systemLibrary(name: "llama", pkgConfig: "llama"),
+    ]
 )
diff --git a/Sources/llama/llama.h b/Sources/llama/llama.h
new file mode 100644 (file)
index 0000000..4172588
--- /dev/null
@@ -0,0 +1,4 @@
+#pragma once
+
+#include <llama.h>
+
diff --git a/Sources/llama/module.modulemap b/Sources/llama/module.modulemap
new file mode 100644 (file)
index 0000000..d010555
--- /dev/null
@@ -0,0 +1,5 @@
+module llama [system] {
+    header "llama.h"
+    link "llama"
+    export *
+}
index 326acbb6108fd02e13787371d6dbefcd511c97f8..0b2b6bcfabfd1b5fe1ed8d00014726b600b5cfd3 100644 (file)
@@ -6,5 +6,5 @@ includedir=${prefix}/include
 Name: llama
 Description: Port of Facebook's LLaMA model in C/C++
 Version: @PROJECT_VERSION@
-Libs: -L${libdir} -lllama
+Libs: -L${libdir} -lggml  -lggml-base -lllama
 Cflags: -I${includedir}
index 65cd4eb515c7f4b698e0abdfadf6a8c326a944da..998c673d5d31fa7937bbd458924ab749e75caf18 100644 (file)
@@ -210,20 +210,20 @@ actor LlamaContext {
 
             llama_kv_cache_clear(context)
 
-            let t_pp_start = ggml_time_us()
+            let t_pp_start = DispatchTime.now().uptimeNanoseconds / 1000;
 
             if llama_decode(context, batch) != 0 {
                 print("llama_decode() failed during prompt")
             }
             llama_synchronize(context)
 
-            let t_pp_end = ggml_time_us()
+            let t_pp_end = DispatchTime.now().uptimeNanoseconds / 1000;
 
             // bench text generation
 
             llama_kv_cache_clear(context)
 
-            let t_tg_start = ggml_time_us()
+            let t_tg_start = DispatchTime.now().uptimeNanoseconds / 1000;
 
             for i in 0..<tg {
                 llama_batch_clear(&batch)
@@ -238,7 +238,7 @@ actor LlamaContext {
                 llama_synchronize(context)
             }
 
-            let t_tg_end = ggml_time_us()
+            let t_tg_end = DispatchTime.now().uptimeNanoseconds / 1000;
 
             llama_kv_cache_clear(context)
 
index 3950b9e9df843118dbde9944cd394693b08998e3..ff3d108b2a18c045fc7bb0556872b5b02660a716 100644 (file)
@@ -7,6 +7,7 @@
        objects = {
 
 /* Begin PBXBuildFile section */
+               1809696D2D05A39F00400EE8 /* llama in Frameworks */ = {isa = PBXBuildFile; productRef = 1809696C2D05A39F00400EE8 /* llama */; };
                549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 549479CA2AC9E16000E0F78B /* Metal.framework */; };
                79E1D9CD2B4CD16E005F8E46 /* InputButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = 79E1D9CC2B4CD16E005F8E46 /* InputButton.swift */; };
                7FA3D2B32B2EA2F600543F92 /* DownloadButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7FA3D2B22B2EA2F600543F92 /* DownloadButton.swift */; };
@@ -17,7 +18,6 @@
                8A3F84242AC4C891005E2EE8 /* models in Resources */ = {isa = PBXBuildFile; fileRef = 8A3F84232AC4C891005E2EE8 /* models */; };
                8A907F332AC7138A006146EA /* LibLlama.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A907F322AC7134E006146EA /* LibLlama.swift */; };
                8A9F7C4D2AC332EE008AE1EA /* LlamaState.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8A9F7C4C2AC332EE008AE1EA /* LlamaState.swift */; };
-               DF810E132B4A5BA200301144 /* llama in Frameworks */ = {isa = PBXBuildFile; productRef = DF810E122B4A5BA200301144 /* llama */; };
                F1FE20E22B465ECA00B45541 /* LoadCustomButton.swift in Sources */ = {isa = PBXBuildFile; fileRef = F1FE20E12B465EC900B45541 /* LoadCustomButton.swift */; };
 /* End PBXBuildFile section */
 
@@ -42,7 +42,7 @@
                        isa = PBXFrameworksBuildPhase;
                        buildActionMask = 2147483647;
                        files = (
-                               DF810E132B4A5BA200301144 /* llama in Frameworks */,
+                               1809696D2D05A39F00400EE8 /* llama in Frameworks */,
                                549479CB2AC9E16000E0F78B /* Metal.framework in Frameworks */,
                                8A39BE0A2AC7601100BFEB40 /* Accelerate.framework in Frameworks */,
                        );
                        );
                        name = llama.swiftui;
                        packageProductDependencies = (
-                               DF810E122B4A5BA200301144 /* llama */,
+                               1809696C2D05A39F00400EE8 /* llama */,
                        );
                        productName = llama.swiftui;
                        productReference = 8A1C83732AC328BD0096AF73 /* llama.swiftui.app */;
 /* End XCConfigurationList section */
 
 /* Begin XCSwiftPackageProductDependency section */
-               DF810E122B4A5BA200301144 /* llama */ = {
+               1809696C2D05A39F00400EE8 /* llama */ = {
                        isa = XCSwiftPackageProductDependency;
                        productName = llama;
                };