From: ochafik Date: Mon, 14 Aug 2023 01:17:29 +0000 (+0100) Subject: Added some tests to python example + fixed numpy on scalar tensors X-Git-Tag: upstream/0.0.1642~1269^2~1 X-Git-Url: https://git.djapps.eu/?a=commitdiff_plain;h=548aa504e0c1690296525ce9c0fcf4b1f6563e1b;p=pkg%2Fggml%2Fsources%2Fggml Added some tests to python example + fixed numpy on scalar tensors --- diff --git a/examples/python/README.md b/examples/python/README.md index 5510c706..480920f7 100644 --- a/examples/python/README.md +++ b/examples/python/README.md @@ -96,6 +96,14 @@ In fact, if you wanted to only generate bindings for the current version of the API=../../include/ggml/ggml.h python regenerate.py ``` +## Develop + +Run tests: + +```bash +pytest +``` + ### Alternatives This example's goal is to showcase [cffi](https://cffi.readthedocs.io/)-generated bindings that are trivial to use and update, but there are already alternatives in the wild: diff --git a/examples/python/ggml/utils.py b/examples/python/ggml/utils.py index 2e95798a..9fef6825 100644 --- a/examples/python/ggml/utils.py +++ b/examples/python/ggml/utils.py @@ -88,7 +88,8 @@ def numpy(tensor: ffi.CData, allow_copy: Union[bool, np.ndarray] = False, allow_ raise NotImplementedError(f'Cannot convert {__describe(tensor)} to numpy') assert __is_contiguous(tensor), f"Cannot convert {__describe(tensor)} to numpy (support contiguous tensors only)" - array = np.frombuffer(ffi.buffer(lib.ggml_get_data(tensor), lib.ggml_nbytes(tensor)), dtype=dtype) + nbytes = lib.ggml_nelements(tensor) * lib.ggml_type_size(tensor.type) + array = np.frombuffer(ffi.buffer(lib.ggml_get_data(tensor), nbytes), dtype=dtype) array.shape = shape return array @@ -122,7 +123,7 @@ def __dtype_to_type(dtype: np.dtype): elif dtype == np.int8: return lib.GGML_TYPE_I8 else: raise ValueError(f"Unsupported dtype: {dtype}") -def __describe(tensor: ffi.CType): return f'Tensor[{__type_name(tensor.type)}, {__get_shape(tensor)}]' +def __describe(tensor: ffi.CType): return f'Tensor[{__type_name(__get_type(tensor))}, {__get_shape(tensor)}]' def __get_type(tensor: TensorLike): return __dtype_to_type(tensor.dtype) if isinstance(tensor, np.ndarray) else tensor.type def __get_shape(x: TensorLike): return x.shape if isinstance(x, np.ndarray) else tuple([x.ne[i] for i in range(x.n_dims)]) def __get_strides(x: TensorLike): return x.strides if isinstance(x, np.ndarray) else tuple([x.nb[i] for i in range(x.n_dims)]) @@ -139,7 +140,7 @@ def __get_floats(tensor: TensorLike) -> ffi.CData: nelements = __get_nelements(tensor) floats = ffi.new('float[]', nelements) if type == lib.GGML_TYPE_F16: - lib.ggml_fp16_to_fp32_row(data, floats, nelements) + lib.ggml_fp16_to_fp32_row(ffi.cast('uint16_t*', data), floats, nelements) elif lib.ggml_is_quantized(type): qtype = lib.ggml_internal_get_type_traits(type) assert qtype.to_float, f"Type {__type_name(type)} is not supported by ggml" @@ -155,7 +156,7 @@ def __set_floats(tensor: TensorLike, f32_data: ffi.CData) -> None: else: nelements = __get_nelements(tensor) if type == lib.GGML_TYPE_F16: - lib.ggml_fp32_to_fp16_row(f32_data, data, nelements) + lib.ggml_fp32_to_fp16_row(f32_data, ffi.cast('uint16_t*', data), nelements) elif lib.ggml_is_quantized(type): qtype = lib.ggml_internal_get_type_traits(type) assert qtype.from_float, f"Type {__type_name(type)} is not supported by ggml" diff --git a/examples/python/test_tensor.py b/examples/python/test_tensor.py new file mode 100644 index 00000000..1a365fae --- /dev/null +++ b/examples/python/test_tensor.py @@ -0,0 +1,258 @@ +import pytest +from pytest import raises + +from ggml import lib, ffi +from ggml.utils import init, copy, numpy +import numpy as np +import numpy.testing as npt + +@pytest.fixture() +def ctx(): + print("setup") + yield init(mem_size=10*1024*1024) + print("teardown") + +class TestNumPy: + + # Single element + + def test_set_get_single_i32(self, ctx): + i = lib.ggml_new_i32(ctx, 42) + assert lib.ggml_get_i32_1d(i, 0) == 42 + assert numpy(i) == np.array([42], dtype=np.int32) + + def test_set_get_single_f32(self, ctx): + i = lib.ggml_new_f32(ctx, 4.2) + + epsilon = 0.000001 # Not sure why so large a difference?? + pytest.approx(lib.ggml_get_f32_1d(i, 0), 4.2, epsilon) + pytest.approx(numpy(i), np.array([4.2], dtype=np.float32), epsilon) + + def _test_copy_np_to_ggml(self, a: np.ndarray, t: ffi.CData): + a2 = a.copy() # Clone original + copy(a, t) + npt.assert_array_equal(numpy(t), a2) + + # I32 + + def test_copy_np_to_ggml_1d_i32(self, ctx): + t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_I32, 10) + a = np.arange(10, dtype=np.int32) + self._test_copy_np_to_ggml(a, t) + + def test_copy_np_to_ggml_2d_i32(self, ctx): + t = lib.ggml_new_tensor_2d(ctx, lib.GGML_TYPE_I32, 2, 3) + a = np.arange(2 * 3, dtype=np.int32).reshape((2, 3)) + self._test_copy_np_to_ggml(a, t) + + def test_copy_np_to_ggml_3d_i32(self, ctx): + t = lib.ggml_new_tensor_3d(ctx, lib.GGML_TYPE_I32, 2, 3, 4) + a = np.arange(2 * 3 * 4, dtype=np.int32).reshape((2, 3, 4)) + self._test_copy_np_to_ggml(a, t) + + def test_copy_np_to_ggml_4d_i32(self, ctx): + t = lib.ggml_new_tensor_4d(ctx, lib.GGML_TYPE_I32, 2, 3, 4, 5) + a = np.arange(2 * 3 * 4 * 5, dtype=np.int32).reshape((2, 3, 4, 5)) + self._test_copy_np_to_ggml(a, t) + + def test_copy_np_to_ggml_4d_n_i32(self, ctx): + dims = [2, 3, 4, 5] # GGML_MAX_DIMS is 4, going beyond would crash + pdims = ffi.new('int64_t[]', len(dims)) + for i, d in enumerate(dims): pdims[i] = d + t = lib.ggml_new_tensor(ctx, lib.GGML_TYPE_I32, len(dims), pdims) + a = np.arange(np.prod(dims), dtype=np.int32).reshape(tuple(pdims)) + self._test_copy_np_to_ggml(a, t) + + # F32 + + def test_copy_np_to_ggml_1d_f32(self, ctx): + t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, 10) + a = np.arange(10, dtype=np.float32) + self._test_copy_np_to_ggml(a, t) + + def test_copy_np_to_ggml_2d_f32(self, ctx): + t = lib.ggml_new_tensor_2d(ctx, lib.GGML_TYPE_F32, 2, 3) + a = np.arange(2 * 3, dtype=np.float32).reshape((2, 3)) + self._test_copy_np_to_ggml(a, t) + + def test_copy_np_to_ggml_3d_f32(self, ctx): + t = lib.ggml_new_tensor_3d(ctx, lib.GGML_TYPE_F32, 2, 3, 4) + a = np.arange(2 * 3 * 4, dtype=np.float32).reshape((2, 3, 4)) + self._test_copy_np_to_ggml(a, t) + + def test_copy_np_to_ggml_4d_f32(self, ctx): + t = lib.ggml_new_tensor_4d(ctx, lib.GGML_TYPE_F32, 2, 3, 4, 5) + a = np.arange(2 * 3 * 4 * 5, dtype=np.float32).reshape((2, 3, 4, 5)) + self._test_copy_np_to_ggml(a, t) + + def test_copy_np_to_ggml_4d_n_f32(self, ctx): + dims = [2, 3, 4, 5] # GGML_MAX_DIMS is 4, going beyond would crash + pdims = ffi.new('int64_t[]', len(dims)) + for i, d in enumerate(dims): pdims[i] = d + t = lib.ggml_new_tensor(ctx, lib.GGML_TYPE_F32, len(dims), pdims) + a = np.arange(np.prod(dims), dtype=np.float32).reshape(tuple(pdims)) + self._test_copy_np_to_ggml(a, t) + + # F16 + + def test_copy_np_to_ggml_1d_f16(self, ctx): + t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F16, 10) + a = np.arange(10, dtype=np.float16) + self._test_copy_np_to_ggml(a, t) + + def test_copy_np_to_ggml_2d_f16(self, ctx): + t = lib.ggml_new_tensor_2d(ctx, lib.GGML_TYPE_F16, 2, 3) + a = np.arange(2 * 3, dtype=np.float16).reshape((2, 3)) + self._test_copy_np_to_ggml(a, t) + + def test_copy_np_to_ggml_3d_f16(self, ctx): + t = lib.ggml_new_tensor_3d(ctx, lib.GGML_TYPE_F16, 2, 3, 4) + a = np.arange(2 * 3 * 4, dtype=np.float16).reshape((2, 3, 4)) + self._test_copy_np_to_ggml(a, t) + + def test_copy_np_to_ggml_4d_f16(self, ctx): + t = lib.ggml_new_tensor_4d(ctx, lib.GGML_TYPE_F16, 2, 3, 4, 5) + a = np.arange(2 * 3 * 4 * 5, dtype=np.float16).reshape((2, 3, 4, 5)) + self._test_copy_np_to_ggml(a, t) + + def test_copy_np_to_ggml_4d_n_f16(self, ctx): + dims = [2, 3, 4, 5] # GGML_MAX_DIMS is 4, going beyond would crash + pdims = ffi.new('int64_t[]', len(dims)) + for i, d in enumerate(dims): pdims[i] = d + t = lib.ggml_new_tensor(ctx, lib.GGML_TYPE_F16, len(dims), pdims) + a = np.arange(np.prod(dims), dtype=np.float16).reshape(tuple(pdims)) + self._test_copy_np_to_ggml(a, t) + + # Mismatching shapes + + def test_copy_mismatching_shapes_1d(self, ctx): + t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, 10) + a = np.arange(10, dtype=np.float32) + copy(a, t) # OK + + a = a.reshape((5, 2)) + with raises(AssertionError): copy(a, t) + with raises(AssertionError): copy(t, a) + + def test_copy_mismatching_shapes_2d(self, ctx): + t = lib.ggml_new_tensor_2d(ctx, lib.GGML_TYPE_F32, 2, 3) + a = np.arange(6, dtype=np.float32) + copy(a.reshape((2, 3)), t) # OK + + a = a.reshape((3, 2)) + with raises(AssertionError): copy(a, t) + with raises(AssertionError): copy(t, a) + + def test_copy_mismatching_shapes_3d(self, ctx): + t = lib.ggml_new_tensor_3d(ctx, lib.GGML_TYPE_F32, 2, 3, 4) + a = np.arange(24, dtype=np.float32) + copy(a.reshape((2, 3, 4)), t) # OK + + a = a.reshape((2, 4, 3)) + with raises(AssertionError): copy(a, t) + with raises(AssertionError): copy(t, a) + + def test_copy_mismatching_shapes_4d(self, ctx): + t = lib.ggml_new_tensor_4d(ctx, lib.GGML_TYPE_F32, 2, 3, 4, 5) + a = np.arange(24*5, dtype=np.float32) + copy(a.reshape((2, 3, 4, 5)), t) # OK + + a = a.reshape((2, 3, 5, 4)) + with raises(AssertionError): copy(a, t) + with raises(AssertionError): copy(t, a) + + def test_copy_f16_to_f32(self, ctx): + t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, 1) + a = np.array([123.45], dtype=np.float16) + copy(a, t) + np.testing.assert_allclose(lib.ggml_get_f32_1d(t, 0), 123.45, rtol=1e-3) + + def test_copy_f32_to_f16(self, ctx): + t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F16, 1) + a = np.array([123.45], dtype=np.float32) + copy(a, t) + np.testing.assert_allclose(lib.ggml_get_f32_1d(t, 0), 123.45, rtol=1e-3) + + def test_copy_f16_to_Q5_K(self, ctx): + n = 256 + t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_Q5_K, n) + a = np.arange(n, dtype=np.float16) + copy(a, t) + np.testing.assert_allclose(a, numpy(t, allow_copy=True), rtol=0.05) + + def test_copy_Q5_K_to_f16(self, ctx): + n = 256 + t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_Q5_K, n) + copy(np.arange(n, dtype=np.float32), t) + a = np.arange(n, dtype=np.float16) + copy(t, a) + np.testing.assert_allclose(a, numpy(t, allow_copy=True), rtol=0.05) + + def test_copy_i16_f32_mismatching_types(self, ctx): + t = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, 1) + a = np.arange(1, dtype=np.int16) + with raises(NotImplementedError): copy(a, t) + with raises(NotImplementedError): copy(t, a) + +class TestTensorCopy: + + def test_copy_self(self, ctx): + t = lib.ggml_new_i32(ctx, 42) + copy(t, t) + assert lib.ggml_get_i32_1d(t, 0) == 42 + + def test_copy_1d(self, ctx): + t1 = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, 10) + t2 = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, 10) + a = np.arange(10, dtype=np.float32) + copy(a, t1) + copy(t1, t2) + assert np.allclose(a, numpy(t2)) + assert np.allclose(numpy(t1), numpy(t2)) + +class TestGraph: + + def test_add(self, ctx): + n = 256 + ta = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, n) + tb = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, n) + tsum = lib.ggml_add(ctx, ta, tb) + assert tsum.type == lib.GGML_TYPE_F32 + + gf = ffi.new('struct ggml_cgraph*') + lib.ggml_build_forward_expand(gf, tsum) + + a = np.arange(0, n, dtype=np.float32) + b = np.arange(n, 0, -1, dtype=np.float32) + copy(a, ta) + copy(b, tb) + + lib.ggml_graph_compute_with_ctx(ctx, gf, 1) + + assert np.allclose(numpy(tsum, allow_copy=True), a + b) + +class TestQuantization: + + def test_quantized_add(self, ctx): + n = 256 + ta = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_Q5_K, n) + tb = lib.ggml_new_tensor_1d(ctx, lib.GGML_TYPE_F32, n) + tsum = lib.ggml_add(ctx, ta, tb) + assert tsum.type == lib.GGML_TYPE_Q5_K + + gf = ffi.new('struct ggml_cgraph*') + lib.ggml_build_forward_expand(gf, tsum) + + a = np.arange(0, n, dtype=np.float32) + b = np.arange(n, 0, -1, dtype=np.float32) + copy(a, ta) + copy(b, tb) + + lib.ggml_graph_compute_with_ctx(ctx, gf, 1) + + unquantized_sum = a + b + sum = numpy(tsum, allow_copy=True) + + diff = np.linalg.norm(unquantized_sum - sum, np.inf) + assert diff > 4 + assert diff < 5