]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
llama.swiftui : add more models
authorGeorgi Gerganov <redacted>
Mon, 18 Dec 2023 18:05:12 +0000 (20:05 +0200)
committerGeorgi Gerganov <redacted>
Mon, 18 Dec 2023 18:05:12 +0000 (20:05 +0200)
examples/llama.swiftui/llama.cpp.swift/LibLlama.swift
examples/llama.swiftui/llama.swiftui/UI/ContentView.swift

index 272e1fd8a224105a5884f42a0ef834a78d1d9c02..464fb3277aa25ce7bab067c3ac41c9834ee54e23 100644 (file)
@@ -203,7 +203,7 @@ actor LlamaContext {
         var pp_std: Double = 0
         var tg_std: Double = 0
 
-        for r in 0..<nr {
+        for _ in 0..<nr {
             // bench prompt processing
 
             llama_batch_clear(&batch)
index 219bf4dc19c28b5a75907082651aaec190282fce..9cbe8efd66d1f75bf2a6036d0373365b13077416 100644 (file)
@@ -75,21 +75,48 @@ struct ContentView: View {
             VStack {
                 DownloadButton(
                     llamaState: llamaState,
-                    modelName: "TinyLlama-1.1B (Q4_0)",
+                    modelName: "TinyLlama-1.1B (Q4_0, 0.6 GiB)",
                     modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q4_0.gguf?download=true",
                     filename: "tinyllama-1.1b-1t-openorca.Q4_0.gguf"
                 )
                 .font(.system(size: 12))
                 .padding(.top, 4)
+                .frame(maxWidth: .infinity, alignment: .leading)
 
                 DownloadButton(
                     llamaState: llamaState,
-                    modelName: "TinyLlama-1.1B (Q8_0)",
+                    modelName: "TinyLlama-1.1B (Q8_0, 1.1 GiB)",
                     modelUrl: "https://huggingface.co/TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF/resolve/main/tinyllama-1.1b-1t-openorca.Q8_0.gguf?download=true",
                     filename: "tinyllama-1.1b-1t-openorca.Q8_0.gguf"
                 )
                 .font(.system(size: 12))
 
+                DownloadButton(
+                    llamaState: llamaState,
+                    modelName: "Phi-2.7B (Q4_0, 1.6 GiB)",
+                    modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf?download=true",
+                    filename: "phi-2-q4_0.gguf"
+                )
+                .font(.system(size: 12))
+                .frame(maxWidth: .infinity, alignment: .leading)
+
+                DownloadButton(
+                    llamaState: llamaState,
+                    modelName: "Phi-2.7B (Q8_0, 2.8 GiB)",
+                    modelUrl: "https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q8_0.gguf?download=true",
+                    filename: "phi-2-q8_0.gguf"
+                )
+                .font(.system(size: 12))
+
+                DownloadButton(
+                    llamaState: llamaState,
+                    modelName: "Mistral-7B-v0.1 (Q4_0, 3.8 GiB)",
+                    modelUrl: "https://huggingface.co/TheBloke/Mistral-7B-v0.1-GGUF/resolve/main/mistral-7b-v0.1.Q4_0.gguf?download=true",
+                    filename: "mistral-7b-v0.1.Q4_0.gguf"
+                )
+                .font(.system(size: 12))
+                .frame(maxWidth: .infinity, alignment: .leading)
+
                 Button("Clear downloaded models") {
                     ContentView.cleanupModelCaches()
                     llamaState.cacheCleared = true