]> git.djapps.eu Git - pkg/ggml/sources/llama.cpp/commitdiff
zig : update build.zig (#872)
authorJudd <redacted>
Thu, 13 Apr 2023 13:43:22 +0000 (21:43 +0800)
committerGitHub <redacted>
Thu, 13 Apr 2023 13:43:22 +0000 (16:43 +0300)
* update

* update readme

* minimize the changes.

---------

Co-authored-by: zjli2019 <redacted>
README.md
build.zig

index dbc088532b05ddcd42076367a9a5daf8c8021374..c0958ebd69cb0814323e729b6ee8364544efd869 100644 (file)
--- a/README.md
+++ b/README.md
@@ -149,21 +149,43 @@ https://user-images.githubusercontent.com/1991296/224442907-7693d4be-acaa-4e01-8
 
 ## Usage
 
-Here are the step for the LLaMA-7B model:
+Here are the step for the LLaMA-7B model.
+
+### Get the Code
 
 ```bash
-# build this repo
 git clone https://github.com/ggerganov/llama.cpp
 cd llama.cpp
-make
+```
+
+### Build
+
+Note: For Windows, CMake or Zig can be used.
+
+1. Use `make`
+
+    ```bash
+    make
+    ```
 
-#For Windows and CMake, use the following command instead:
-cd <path_to_llama_folder>
-mkdir build
-cd build
-cmake ..
-cmake --build . --config Release
+1. Use CMake
 
+    ```bash
+    mkdir build
+    cd build
+    cmake ..
+    cmake --build . --config Release
+    ```
+
+1. Use Zig
+
+    ```bash
+    zig build -Drelease-fast
+    ```
+
+### Prepare Data & Run
+
+```bash
 # obtain the original LLaMA model weights and place them in ./models
 ls ./models
 65B 30B 13B 7B tokenizer_checklist.chk tokenizer.model
index defc2c3ad4434d84099f75000741684a8468aa51..306127ffe2a73f604acfad8615f0c833aec13b69 100644 (file)
--- a/build.zig
+++ b/build.zig
@@ -1,16 +1,14 @@
 const std = @import("std");
 
-pub fn build(b: *std.Build) void {
+pub fn build(b: *std.build.Builder) void {
     const target = b.standardTargetOptions(.{});
-    const optimize = b.standardOptimizeOption(.{});
+    const optimize = b.standardReleaseOptions();
     const want_lto = b.option(bool, "lto", "Want -fLTO");
 
-    const lib = b.addStaticLibrary(.{
-        .name = "llama",
-        .target = target,
-        .optimize = optimize,
-    });
+    const lib = b.addStaticLibrary("llama", null);
     lib.want_lto = want_lto;
+    lib.setTarget(target);
+    lib.setBuildMode(optimize);
     lib.linkLibCpp();
     lib.addIncludePath(".");
     lib.addIncludePath("examples");
@@ -44,16 +42,12 @@ pub fn build(b: *std.Build) void {
 fn build_example(comptime name: []const u8, args: anytype) *std.build.LibExeObjStep {
     const b = args.b;
     const lib = args.lib;
-    const target = args.target;
-    const optimize = args.optimize;
     const want_lto = args.want_lto;
 
-    const exe = b.addExecutable(.{
-        .name = name,
-        .target = target,
-        .optimize = optimize,
-    });
+    const exe = b.addExecutable(name, null);
     exe.want_lto = want_lto;
+    lib.setTarget(args.target);
+    lib.setBuildMode(args.optimize);
     exe.addIncludePath(".");
     exe.addIncludePath("examples");
     exe.addCSourceFiles(&.{