]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama.swiftui : fix infinite loop, ouput timings, buff UI (#4674)
authorPeter Sugihara <redacted>
Fri, 29 Dec 2023 13:58:56 +0000 (05:58 -0800)
committerGitHub <redacted>
Fri, 29 Dec 2023 13:58:56 +0000 (15:58 +0200)
* fix infinite loop

* slight UI simplification, clearer UX

* clearer UI text, add timings to completion log

examples/llama.swiftui/llama.cpp.swift/LibLlama.swift
examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift
examples/llama.swiftui/llama.swiftui/UI/ContentView.swift
examples/llama.swiftui/llama.swiftui/UI/DownloadButton.swift

index 464fb3277aa25ce7bab067c3ac41c9834ee54e23..66244382f5cbc88e6c4333181fcf41c4204e412e 100644 (file)
@@ -1,5 +1,7 @@
 import Foundation
 
+// To use this in your own project, add llama.cpp as a swift package dependency
+// and uncomment this import line.
 // import llama
 
 enum LlamaError: Error {
index 3393eb242f9388f1e6d0a01ba2afb219a4937a86..17cb5b9dde942bac7443baa01dbda6c36d88a66c 100644 (file)
@@ -4,6 +4,7 @@ import Foundation
 class LlamaState: ObservableObject {
     @Published var messageLog = ""
     @Published var cacheCleared = false
+    let NS_PER_S = 1_000_000_000.0
 
     private var llamaContext: LlamaContext?
     private var defaultModelUrl: URL? {
@@ -20,12 +21,12 @@ class LlamaState: ObservableObject {
     }
 
     func loadModel(modelUrl: URL?) throws {
-        messageLog += "Loading model...\n"
         if let modelUrl {
+            messageLog += "Loading model...\n"
             llamaContext = try LlamaContext.create_context(path: modelUrl.path())
             messageLog += "Loaded model \(modelUrl.lastPathComponent)\n"
         } else {
-            messageLog += "Could not locate model\n"
+            messageLog += "Load a model from the list below\n"
         }
     }
 
@@ -34,15 +35,29 @@ class LlamaState: ObservableObject {
             return
         }
 
+        let t_start = DispatchTime.now().uptimeNanoseconds
         await llamaContext.completion_init(text: text)
+        let t_heat_end = DispatchTime.now().uptimeNanoseconds
+        let t_heat = Double(t_heat_end - t_start) / NS_PER_S
+
         messageLog += "\(text)"
 
-        while await llamaContext.n_cur <= llamaContext.n_len {
+        while await llamaContext.n_cur < llamaContext.n_len {
             let result = await llamaContext.completion_loop()
             messageLog += "\(result)"
         }
+
+        let t_end = DispatchTime.now().uptimeNanoseconds
+        let t_generation = Double(t_end - t_heat_end) / NS_PER_S
+        let tokens_per_second = Double(await llamaContext.n_len) / t_generation
+
         await llamaContext.clear()
-        messageLog += "\n\ndone\n"
+        messageLog += """
+            \n
+            Done
+            Heat up took \(t_heat)s
+            Generated \(tokens_per_second) t/s\n
+            """
     }
 
     func bench() async {
@@ -56,10 +71,10 @@ class LlamaState: ObservableObject {
         messageLog += await llamaContext.model_info() + "\n"
 
         let t_start = DispatchTime.now().uptimeNanoseconds
-        await llamaContext.bench(pp: 8, tg: 4, pl: 1) // heat up
+        let _ = await llamaContext.bench(pp: 8, tg: 4, pl: 1) // heat up
         let t_end = DispatchTime.now().uptimeNanoseconds
 
-        let t_heat = Double(t_end - t_start) / 1_000_000_000.0
+        let t_heat = Double(t_end - t_start) / NS_PER_S
         messageLog += "Heat up time: \(t_heat) seconds, please wait...\n"
 
         // if more than 5 seconds, then we're probably running on a slow device
index c78f107b39e0ef792a510b7a13be0304a6920fda..147e0c63bd8dd84f3427d9582e0fc5cfd49d2762 100644 (file)
@@ -42,46 +42,27 @@ struct ContentView: View {
                 Button("Send") {
                     sendText()
                 }
-                .padding(8)
-                .background(Color.blue)
-                .foregroundColor(.white)
-                .cornerRadius(8)
 
                 Button("Bench") {
                     bench()
                 }
-                .padding(8)
-                .background(Color.blue)
-                .foregroundColor(.white)
-                .cornerRadius(8)
 
                 Button("Clear") {
                     clear()
                 }
-                .padding(8)
-                .background(Color.blue)
-                .foregroundColor(.white)
-                .cornerRadius(8)
 
                 Button("Copy") {
                     UIPasteboard.general.string = llamaState.messageLog
                 }
-                .padding(8)
-                .background(Color.blue)
-                .foregroundColor(.white)
-                .cornerRadius(8)
-            }
+            }.buttonStyle(.bordered)
 
-            VStack {
+            VStack(alignment: .leading) {
                 DownloadButton(
                     llamaState: llamaState,
                     modelName: "TinyLlama-1.1B (Q4_0, 0.6 GiB)",
                     modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q4_0.gguf?download=true",
                     filename: "tinyllama-1.1b-1t-openorca.Q4_0.gguf"
                 )
-                .font(.system(size: 12))
-                .padding(.top, 4)
-                .frame(maxWidth: .infinity, alignment: .leading)
 
                 DownloadButton(
                     llamaState: llamaState,
@@ -89,7 +70,6 @@ struct ContentView: View {
                     modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q8_0.gguf?download=true",
                     filename: "tinyllama-1.1b-1t-openorca.Q8_0.gguf"
                 )
-                .font(.system(size: 12))
 
                 DownloadButton(
                     llamaState: llamaState,
@@ -97,8 +77,6 @@ struct ContentView: View {
                     modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/tinyllama-1.1b/ggml-model-f16.gguf?download=true",
                     filename: "tinyllama-1.1b-f16.gguf"
                 )
-                .font(.system(size: 12))
-                .frame(maxWidth: .infinity, alignment: .leading)
 
                 DownloadButton(
                     llamaState: llamaState,
@@ -106,7 +84,6 @@ struct ContentView: View {
                     modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf?download=true",
                     filename: "phi-2-q4_0.gguf"
                 )
-                .font(.system(size: 12))
 
                 DownloadButton(
                     llamaState: llamaState,
@@ -114,8 +91,6 @@ struct ContentView: View {
                     modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q8_0.gguf?download=true",
                     filename: "phi-2-q8_0.gguf"
                 )
-                .font(.system(size: 12))
-                .frame(maxWidth: .infinity, alignment: .leading)
 
                 DownloadButton(
                     llamaState: llamaState,
@@ -123,15 +98,15 @@ struct ContentView: View {
                     modelUrl: "https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_0.gguf?download=true",
                     filename: "mistral-7b-v0.1.Q4_0.gguf"
                 )
-                .font(.system(size: 12))
 
                 Button("Clear downloaded models") {
                     ContentView.cleanupModelCaches()
                     llamaState.cacheCleared = true
                 }
-                .padding(8)
-                .font(.system(size: 12))
             }
+            .padding(.top, 4)
+            .font(.system(size: 12))
+            .frame(maxWidth: .infinity, alignment: .leading)
         }
         .padding()
     }
index 4bd75cb69283cd50e8ca5d345169568a54ebae1c..c9f322ca14e72581a7e2abb0604f70523db4bd74 100644 (file)
@@ -93,7 +93,7 @@ struct DownloadButton: View {
                         print("Error: \(err.localizedDescription)")
                     }
                 }) {
-                    Text("\(modelName) (Downloaded)")
+                    Text("Load \(modelName)")
                 }
             } else {
                 Text("Unknown status")