--- /dev/null
+const std = @import("std");\r
+const c = @cImport({\r
+ @cInclude("stdio.h");\r
+ @cInclude("stdlib.h");\r
+ @cInclude("ggml/ggml.h");\r
+});\r
+\r
+pub fn main() !void {\r
+ const params = .{\r
+ .mem_size = 128*1024*1024,\r
+ .mem_buffer = null,\r
+ .no_alloc = false,\r
+ };\r
+\r
+ const ctx0 = c.ggml_init(params);\r
+ defer c.ggml_free(ctx0);\r
+\r
+ {\r
+ const x = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 1);\r
+\r
+ c.ggml_set_param(ctx0, x);\r
+\r
+ const a = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 1);\r
+ const b = c.ggml_mul(ctx0, x, x);\r
+ const f = c.ggml_mul(ctx0, b, a);\r
+\r
+ // a*x^2\r
+ // 2*a*x\r
+\r
+ c.ggml_print_objects(ctx0);\r
+\r
+ const gf = c.ggml_build_forward(f);\r
+ const gb = c.ggml_build_backward(ctx0, @constCast(&gf), false);\r
+\r
+ _ = c.ggml_set_f32(x, 2.0);\r
+ _ = c.ggml_set_f32(a, 3.0);\r
+\r
+ c.ggml_graph_reset(@constCast(&gf));\r
+ _ = c.ggml_set_f32(f.*.grad, 1.0);\r
+\r
+ c.ggml_graph_compute(ctx0, @constCast(&gb));\r
+\r
+ std.debug.print("f = {d:.6}\n", .{c.ggml_get_f32_1d(f, 0)});\r
+ std.debug.print("df/dx = {d:.6}\n", .{c.ggml_get_f32_1d(x.*.grad, 0)});\r
+\r
+ try std.testing.expect(c.ggml_get_f32_1d(f, 0) == 12.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x.*.grad, 0) == 12.0);\r
+\r
+ _ = c.ggml_set_f32(x, 3.0);\r
+\r
+ c.ggml_graph_reset(@constCast(&gf));\r
+ _ = c.ggml_set_f32(f.*.grad, 1.0);\r
+\r
+ c.ggml_graph_compute(ctx0, @constCast(&gb));\r
+\r
+ std.debug.print("f = {d:.6}\n", .{c.ggml_get_f32_1d(f, 0)});\r
+ std.debug.print("df/dx = {d:.6}\n", .{c.ggml_get_f32_1d(x.*.grad, 0)});\r
+\r
+ try std.testing.expect(c.ggml_get_f32_1d(f, 0) == 27.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x.*.grad, 0) == 18.0);\r
+\r
+ c.ggml_graph_dump_dot(&gf, null, "test1-1-forward.dot");\r
+ c.ggml_graph_dump_dot(&gb, &gf, "test1-1-backward.dot");\r
+ }\r
+\r
+ /////////////////////////////////////////////////////////////\r
+\r
+ {\r
+ const x1 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 1);\r
+ const x2 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 1);\r
+ const x3 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 1);\r
+\r
+ _ = c.ggml_set_f32(x1, 3.0);\r
+ _ = c.ggml_set_f32(x2, 1.0);\r
+ _ = c.ggml_set_f32(x3, 0.0);\r
+\r
+ c.ggml_set_param(ctx0, x1);\r
+ c.ggml_set_param(ctx0, x2);\r
+\r
+ const y = c.ggml_add(ctx0, c.ggml_mul(ctx0, x1, x1), c.ggml_mul(ctx0, x1, x2));\r
+\r
+ const gf = c.ggml_build_forward(y);\r
+ const gb = c.ggml_build_backward(ctx0, @constCast(&gf), false);\r
+\r
+ c.ggml_graph_reset(@constCast(&gf));\r
+ _ = c.ggml_set_f32(y.*.grad, 1.0);\r
+\r
+ c.ggml_graph_compute(ctx0, @constCast(&gb));\r
+\r
+ std.debug.print("y = {d:.6}\n", .{c.ggml_get_f32_1d(y, 0)});\r
+ std.debug.print("df/dx1 = {d:.6}\n", .{c.ggml_get_f32_1d(x1.*.grad, 0)});\r
+ std.debug.print("df/dx2 = {d:.6}\n", .{c.ggml_get_f32_1d(x2.*.grad, 0)});\r
+\r
+ try std.testing.expect(c.ggml_get_f32_1d(y, 0) == 12.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 0) == 7.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 0) == 3.0);\r
+\r
+ const g1 = x1.*.grad;\r
+ const g2 = x2.*.grad;\r
+\r
+ const gbb = c.ggml_build_backward(ctx0, @constCast(&gb), true);\r
+\r
+ c.ggml_graph_reset(@constCast(&gb));\r
+ _ = c.ggml_set_f32(g1.*.grad, 1.0);\r
+ _ = c.ggml_set_f32(g2.*.grad, 1.0);\r
+\r
+ c.ggml_graph_compute(ctx0, @constCast(&gbb));\r
+\r
+ std.debug.print("H * [1, 1] = [ {d:.6} {d:.6} ]\n", .{c.ggml_get_f32_1d(x1.*.grad, 0), c.ggml_get_f32_1d(x2.*.grad, 0)});\r
+\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 0) == 3.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 0) == 1.0);\r
+\r
+ c.ggml_graph_dump_dot(&gf, null, "test1-2-forward.dot");\r
+ c.ggml_graph_dump_dot(&gb, &gf, "test1-2-backward.dot");\r
+ }\r
+ \r
+ ///////////////////////////////////////////////////////////////\r
+\r
+ {\r
+ const x1 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 1);\r
+ const x2 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 1);\r
+\r
+ c.ggml_set_param(ctx0, x1);\r
+ c.ggml_set_param(ctx0, x2);\r
+\r
+ const y = c.ggml_mul(ctx0, c.ggml_add(ctx0, c.ggml_mul(ctx0, x1, x1), c.ggml_mul(ctx0, x1, x2)), x1);\r
+\r
+ const gf = c.ggml_build_forward(y);\r
+ const gb = c.ggml_build_backward(ctx0, @constCast(&gf), false);\r
+\r
+ _ = c.ggml_set_f32(x1, 3.0);\r
+ _ = c.ggml_set_f32(x2, 4.0);\r
+\r
+ c.ggml_graph_reset(@constCast(&gf));\r
+ _ = c.ggml_set_f32(y.*.grad, 1.0);\r
+\r
+ c.ggml_graph_compute(ctx0, @constCast(&gb));\r
+\r
+ std.debug.print("y = {d:.6}\n", .{c.ggml_get_f32_1d(y, 0)});\r
+ std.debug.print("df/dx1 = {d:.6}\n", .{c.ggml_get_f32_1d(x1.*.grad, 0)});\r
+ std.debug.print("df/dx2 = {d:.6}\n", .{c.ggml_get_f32_1d(x2.*.grad, 0)});\r
+\r
+ try std.testing.expect(c.ggml_get_f32_1d(y, 0) == 63.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 0) == 51.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 0) == 9.0);\r
+\r
+ c.ggml_graph_dump_dot(&gf, null, "test1-3-forward.dot");\r
+ c.ggml_graph_dump_dot(&gb, &gf, "test1-3-backward.dot");\r
+ }\r
+\r
+ ///////////////////////////////////////////////////////////////\r
+\r
+ {\r
+ const x1 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 1);\r
+ const x2 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 1);\r
+ const x3 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 1);\r
+\r
+ c.ggml_set_param(ctx0, x1);\r
+ c.ggml_set_param(ctx0, x2);\r
+ c.ggml_set_param(ctx0, x3);\r
+\r
+ const y = c.ggml_mul(ctx0, c.ggml_mul(ctx0, c.ggml_mul(ctx0, x1, x1), c.ggml_mul(ctx0, x2, x2)), x3);\r
+\r
+ const gf = c.ggml_build_forward(y);\r
+ const gb = c.ggml_build_backward(ctx0, @constCast(&gf), false);\r
+\r
+ _ = c.ggml_set_f32(x1, 1.0);\r
+ _ = c.ggml_set_f32(x2, 2.0);\r
+ _ = c.ggml_set_f32(x3, 3.0);\r
+\r
+ c.ggml_graph_reset(@constCast(&gf));\r
+ _ = c.ggml_set_f32(y.*.grad, 1.0);\r
+\r
+ c.ggml_graph_compute(ctx0, @constCast(&gb));\r
+\r
+ std.debug.print("y = {d:.6}\n", .{c.ggml_get_f32_1d(y, 0)});\r
+ std.debug.print("df/dx1 = {d:.6}\n", .{c.ggml_get_f32_1d(x1.*.grad, 0)});\r
+ std.debug.print("df/dx2 = {d:.6}\n", .{c.ggml_get_f32_1d(x2.*.grad, 0)});\r
+ std.debug.print("df/dx3 = {d:.6}\n", .{c.ggml_get_f32_1d(x3.*.grad, 0)});\r
+\r
+ try std.testing.expect(c.ggml_get_f32_1d(y, 0) == 12.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 0) == 24.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 0) == 12.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x3.*.grad, 0) == 4.0); \r
+\r
+ const g1 = x1.*.grad;\r
+ const g2 = x2.*.grad;\r
+ const g3 = x3.*.grad;\r
+\r
+ const gbb = c.ggml_build_backward(ctx0, @constCast(&gb), true);\r
+\r
+ c.ggml_graph_reset(@constCast(&gb));\r
+ _ = c.ggml_set_f32(g1.*.grad, 1.0);\r
+ _ = c.ggml_set_f32(g2.*.grad, 1.0);\r
+ _ = c.ggml_set_f32(g3.*.grad, 1.0);\r
+\r
+ c.ggml_graph_compute(ctx0, @constCast(&gbb));\r
+\r
+ std.debug.print("H * [1, 1, 1] = [ {d:.6} {d:.6} {d:.6}]\n", \r
+ .{ \r
+ c.ggml_get_f32_1d(x1.*.grad, 0), \r
+ c.ggml_get_f32_1d(x2.*.grad, 0),\r
+ c.ggml_get_f32_1d(x3.*.grad, 0),\r
+ });\r
+\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 0) == 56.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 0) == 34.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x3.*.grad, 0) == 12.0); \r
+\r
+ c.ggml_graph_dump_dot(&gf, null, "test1-4-forward.dot");\r
+ c.ggml_graph_dump_dot(&gb, &gf, "test1-4-backward.dot");\r
+ }\r
+\r
+ ///////////////////////////////////////////////////////////////\r
+\r
+ {\r
+ const x1 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 3);\r
+ const x2 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 3);\r
+\r
+ c.ggml_set_param(ctx0, x1);\r
+ c.ggml_set_param(ctx0, x2);\r
+\r
+ const y = c.ggml_sum(ctx0, c.ggml_mul(ctx0, x1, x2));\r
+\r
+ const gf = c.ggml_build_forward(y);\r
+ const gb = c.ggml_build_backward(ctx0, @constCast(&gf), false);\r
+\r
+ _ = c.ggml_set_f32(x1, 3.0);\r
+ _ = c.ggml_set_f32(x2, 5.0);\r
+\r
+ c.ggml_graph_reset(@constCast(&gf));\r
+ _ = c.ggml_set_f32(y.*.grad, 1.0);\r
+\r
+ c.ggml_graph_compute(ctx0, @constCast(&gb));\r
+\r
+ std.debug.print("y = {d:.6}\n", .{c.ggml_get_f32_1d(y, 0)});\r
+ std.debug.print("df/dx1 = {d:.6} {d:.6} {d:.6}\n", \r
+ .{\r
+ c.ggml_get_f32_1d(x1.*.grad, 0),\r
+ c.ggml_get_f32_1d(x1.*.grad, 1),\r
+ c.ggml_get_f32_1d(x1.*.grad, 2),\r
+ });\r
+ std.debug.print("df/dx2 = {d:.6} {d:.6} {d:.6}\n", \r
+ .{\r
+ c.ggml_get_f32_1d(x2.*.grad, 0),\r
+ c.ggml_get_f32_1d(x2.*.grad, 1),\r
+ c.ggml_get_f32_1d(x2.*.grad, 2),\r
+ });\r
+\r
+ try std.testing.expect(c.ggml_get_f32_1d(y, 0) == 45.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 0) == 5.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 0) == 3.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 1) == 5.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 1) == 3.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 2) == 5.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 2) == 3.0);\r
+\r
+ c.ggml_graph_dump_dot(&gf, null, "test1-5-forward.dot");\r
+ c.ggml_graph_dump_dot(&gb, &gf, "test1-5-backward.dot");\r
+ }\r
+\r
+ ///////////////////////////////////////////////////////////////\r
+\r
+ {\r
+ const x1 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 3);\r
+ const x2 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 3);\r
+\r
+ c.ggml_set_param(ctx0, x1);\r
+ c.ggml_set_param(ctx0, x2);\r
+\r
+ const y =\r
+ c.ggml_sum(ctx0,\r
+ c.ggml_add(ctx0,\r
+ c.ggml_mul(ctx0, x1, x2),\r
+ c.ggml_mul(ctx0,\r
+ c.ggml_repeat(ctx0, c.ggml_new_f32(ctx0, -2.0), x1),\r
+ c.ggml_mul(ctx0, x1, x1)\r
+ )\r
+ )\r
+ );\r
+\r
+ const gf = c.ggml_build_forward(y);\r
+ const gb = c.ggml_build_backward(ctx0, @constCast(&gf), false);\r
+\r
+ _ = c.ggml_set_f32(x1, 3.0);\r
+ _ = c.ggml_set_f32(x2, 5.0);\r
+\r
+ c.ggml_graph_reset(@constCast(&gf));\r
+ _ = c.ggml_set_f32(y.*.grad, 1.0);\r
+\r
+ c.ggml_graph_compute(ctx0, @constCast(&gb));\r
+\r
+ std.debug.print("y = {d:.6}\n", .{c.ggml_get_f32_1d(y, 0)});\r
+ std.debug.print("df/dx1 = {d:.6} {d:.6} {d:.6}\n", \r
+ .{\r
+ c.ggml_get_f32_1d(x1.*.grad, 0),\r
+ c.ggml_get_f32_1d(x1.*.grad, 1),\r
+ c.ggml_get_f32_1d(x1.*.grad, 2),\r
+ });\r
+ std.debug.print("df/dx2 = {d:.6} {d:.6} {d:.6}\n", \r
+ .{\r
+ c.ggml_get_f32_1d(x2.*.grad, 0),\r
+ c.ggml_get_f32_1d(x2.*.grad, 1),\r
+ c.ggml_get_f32_1d(x2.*.grad, 2),\r
+ });\r
+\r
+ try std.testing.expect(c.ggml_get_f32_1d(y, 0) == -9.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 0) == -7.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 1) == -7.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 2) == -7.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 0) == 3.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 1) == 3.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 2) == 3.0);\r
+\r
+ c.ggml_graph_dump_dot(&gf, null, "test1-6-forward.dot");\r
+ c.ggml_graph_dump_dot(&gb, &gf, "test1-6-backward.dot");\r
+ }\r
+\r
+ ///////////////////////////////////////////////////////////////\r
+\r
+ {\r
+ const x1 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 3);\r
+ const x2 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 3);\r
+\r
+ c.ggml_set_param(ctx0, x1);\r
+ c.ggml_set_param(ctx0, x2);\r
+\r
+ const y =\r
+ c.ggml_sum(ctx0,\r
+ c.ggml_sub(ctx0,\r
+ c.ggml_mul(ctx0, x1, x2),\r
+ c.ggml_mul(ctx0,\r
+ c.ggml_mul(ctx0, x1, x1),\r
+ c.ggml_repeat(ctx0, c.ggml_new_f32(ctx0, -2.0), x1)\r
+ )\r
+ )\r
+ );\r
+\r
+ const gf = c.ggml_build_forward(y);\r
+ const gb = c.ggml_build_backward(ctx0, @constCast(&gf), false);\r
+\r
+ _ = c.ggml_set_f32(x1, 3.0);\r
+ _ = c.ggml_set_f32(x2, 5.0);\r
+\r
+ c.ggml_graph_reset(@constCast(&gf));\r
+ _ = c.ggml_set_f32(y.*.grad, 1.0);\r
+\r
+ c.ggml_graph_compute(ctx0, @constCast(&gb));\r
+\r
+ std.debug.print("y = {d:.6}\n", .{c.ggml_get_f32_1d(y, 0)});\r
+ std.debug.print("df/dx1 = {d:.6} {d:.6} {d:.6}\n", \r
+ .{\r
+ c.ggml_get_f32_1d(x1.*.grad, 0),\r
+ c.ggml_get_f32_1d(x1.*.grad, 1),\r
+ c.ggml_get_f32_1d(x1.*.grad, 2),\r
+ });\r
+ std.debug.print("df/dx2 = {d:.6} {d:.6} {d:.6}\n", \r
+ .{\r
+ c.ggml_get_f32_1d(x2.*.grad, 0),\r
+ c.ggml_get_f32_1d(x2.*.grad, 1),\r
+ c.ggml_get_f32_1d(x2.*.grad, 2),\r
+ });\r
+\r
+ try std.testing.expect(c.ggml_get_f32_1d(y, 0) == 99.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 0) == 17.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 1) == 17.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 2) == 17.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 0) == 3.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 1) == 3.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 2) == 3.0);\r
+\r
+ c.ggml_graph_dump_dot(&gf, null, "test1-7-forward.dot");\r
+ c.ggml_graph_dump_dot(&gb, &gf, "test1-7-backward.dot");\r
+ }\r
+\r
+ ///////////////////////////////////////////////////////////////\r
+\r
+ {\r
+ const x1 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 3);\r
+ const x2 = c.ggml_new_tensor_1d(ctx0, c.GGML_TYPE_F32, 3);\r
+\r
+ c.ggml_set_param(ctx0, x1);\r
+ c.ggml_set_param(ctx0, x2);\r
+\r
+ const y =\r
+ c.ggml_abs(ctx0,\r
+ c.ggml_sub(ctx0, x1, x2)\r
+ );\r
+\r
+ const gf = c.ggml_build_forward(y);\r
+ const gb = c.ggml_build_backward(ctx0, @constCast(&gf), false);\r
+\r
+ _ = c.ggml_set_f32(x1, 3.0);\r
+ _ = c.ggml_set_f32(x2, 5.0);\r
+\r
+ c.ggml_graph_reset(@constCast(&gf));\r
+ _ = c.ggml_set_f32(y.*.grad, 1.0);\r
+\r
+ c.ggml_graph_compute(ctx0, @constCast(&gb));\r
+\r
+ std.debug.print("y = {d:.6}\n", .{c.ggml_get_f32_1d(y, 0)});\r
+ std.debug.print("df/dx1 = {d:.6} {d:.6} {d:.6}\n", \r
+ .{\r
+ c.ggml_get_f32_1d(x1.*.grad, 0),\r
+ c.ggml_get_f32_1d(x1.*.grad, 1),\r
+ c.ggml_get_f32_1d(x1.*.grad, 2),\r
+ });\r
+ std.debug.print("df/dx2 = {d:.6} {d:.6} {d:.6}\n", \r
+ .{\r
+ c.ggml_get_f32_1d(x2.*.grad, 0),\r
+ c.ggml_get_f32_1d(x2.*.grad, 1),\r
+ c.ggml_get_f32_1d(x2.*.grad, 2),\r
+ });\r
+\r
+ try std.testing.expect(c.ggml_get_f32_1d(y, 0) == 2.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 0) == -1.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 1) == -1.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 2) == -1.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 0) == 1.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 1) == 1.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 2) == 1.0);\r
+\r
+ _ = c.ggml_set_f32(x1, 7.0);\r
+ _ = c.ggml_set_f32(x2, 5.0);\r
+\r
+ c.ggml_graph_reset(@constCast(&gf));\r
+ _ = c.ggml_set_f32(y.*.grad, 1.0);\r
+\r
+ c.ggml_graph_compute(ctx0, @constCast(&gb));\r
+\r
+ std.debug.print("y = {d:.6}\n", .{c.ggml_get_f32_1d(y, 0)});\r
+ std.debug.print("df/dx1 = {d:.6} {d:.6} {d:.6}\n", \r
+ .{\r
+ c.ggml_get_f32_1d(x1.*.grad, 0),\r
+ c.ggml_get_f32_1d(x1.*.grad, 1),\r
+ c.ggml_get_f32_1d(x1.*.grad, 2),\r
+ });\r
+ std.debug.print("df/dx2 = {d:.6} {d:.6} {d:.6}\n", \r
+ .{\r
+ c.ggml_get_f32_1d(x2.*.grad, 0),\r
+ c.ggml_get_f32_1d(x2.*.grad, 1),\r
+ c.ggml_get_f32_1d(x2.*.grad, 2),\r
+ });\r
+\r
+ try std.testing.expect(c.ggml_get_f32_1d(y, 0) == 2.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 0) == 1.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 1) == 1.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x1.*.grad, 2) == 1.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 0) == -1.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 1) == -1.0);\r
+ try std.testing.expect(c.ggml_get_f32_1d(x2.*.grad, 2) == -1.0);\r
+\r
+ c.ggml_graph_dump_dot(&gf, null, "test1-8-forward.dot");\r
+ c.ggml_graph_dump_dot(&gb, &gf, "test1-8-backward.dot");\r
+ }\r
+\r
+ _ = try std.io.getStdIn().reader().readByte();\r
+}
\ No newline at end of file