2023-07-08 09:35:05 +08:00
|
|
|
#!/usr/bin/env python
|
|
|
|
import numpy as np
|
|
|
|
import unittest
|
2024-01-14 01:46:51 +08:00
|
|
|
from tinygrad import Tensor, Device, dtypes
|
2024-08-17 06:17:57 +08:00
|
|
|
from tinygrad.ops import UOps
|
2024-08-17 03:09:00 +08:00
|
|
|
from tinygrad.lazy import LazyBuffer, MetaOps
|
2024-03-27 12:02:46 +08:00
|
|
|
from tinygrad.engine.schedule import create_schedule
|
2023-07-08 09:35:05 +08:00
|
|
|
|
|
|
|
class TestLazyBuffer(unittest.TestCase):
|
|
|
|
def test_fromcpu_shape_tracker(self):
|
|
|
|
def helper(a: np.ndarray):
|
|
|
|
print(a.shape, a.strides, a.flags.c_contiguous)
|
2024-01-15 10:21:08 +08:00
|
|
|
b = Tensor(a).lazydata
|
2023-09-23 10:05:13 +08:00
|
|
|
#assert b.st.contiguous == a.flags.c_contiguous
|
2023-07-08 09:35:05 +08:00
|
|
|
assert b.st.shape == a.shape
|
2023-10-04 22:18:58 +08:00
|
|
|
np.testing.assert_equal(a, Tensor(b).numpy())
|
2023-07-08 09:35:05 +08:00
|
|
|
|
|
|
|
for ndims in range(1, 4):
|
|
|
|
a = np.random.randn(*(4,)*ndims).astype(np.float32)
|
|
|
|
for stride in [-2, 1, 2]:
|
|
|
|
for start in [0, 1]:
|
|
|
|
helper(a[(slice(start, None, stride),)*ndims])
|
|
|
|
|
2023-07-12 06:30:35 +08:00
|
|
|
def test_shuffle_pad_ops_cmpeq(self):
|
2023-08-09 04:58:10 +08:00
|
|
|
y = Tensor([1]).cat(Tensor([1]) == 0).numpy()
|
2023-07-12 06:30:35 +08:00
|
|
|
z = Tensor([1, 0]).numpy()
|
|
|
|
np.testing.assert_allclose(y, z)
|
|
|
|
|
|
|
|
def test_shuffle_pad_ops_div(self):
|
|
|
|
y = Tensor([1]).cat(Tensor([1]).div(Tensor([2.0]))).numpy()
|
|
|
|
z = Tensor([1, 0.5]).numpy()
|
|
|
|
np.testing.assert_allclose(y, z)
|
|
|
|
|
|
|
|
def test_shuffle_pad_ops_log(self):
|
|
|
|
y = Tensor([1]).cat(Tensor([1]).log()).numpy()
|
|
|
|
z = Tensor([1, 0]).numpy()
|
|
|
|
np.testing.assert_allclose(y, z)
|
|
|
|
|
|
|
|
def test_shuffle_pad_ops_exp(self):
|
|
|
|
y = Tensor([1]).cat(Tensor([1]).exp()).numpy()
|
|
|
|
z = Tensor([1, np.e]).numpy()
|
|
|
|
np.testing.assert_allclose(y, z)
|
|
|
|
|
2024-01-10 01:47:20 +08:00
|
|
|
def test_device_0_is_the_same_device(self):
|
2024-01-04 01:55:25 +08:00
|
|
|
a = Tensor([1, 2, 3], f"{Device.DEFAULT}")
|
|
|
|
b = Tensor([1, 2, 3], f"{Device.DEFAULT}:0")
|
|
|
|
assert a.device == b.device
|
|
|
|
|
2024-01-12 12:22:52 +08:00
|
|
|
def test_shrink_const_into_zero(self):
|
2024-01-14 01:46:51 +08:00
|
|
|
# regression test to make sure the shapetracker is preserved
|
2024-01-12 12:22:52 +08:00
|
|
|
a = Tensor.zeros(4,4,4).shrink((None, (0,0), None))
|
|
|
|
b = Tensor.zeros(4,1,4)
|
|
|
|
c = a.cat(b, dim=1)
|
|
|
|
np.testing.assert_allclose(c.numpy(), np.concatenate((a.numpy(), b.numpy()), axis=1))
|
|
|
|
|
2024-01-14 01:46:51 +08:00
|
|
|
def test_shrink_const_then_cast(self):
|
|
|
|
# regression test to make sure the shapetracker is preserved
|
|
|
|
a = Tensor.zeros(4,4,4).shrink((None, (0,0), None)).cast(dtypes.int32)
|
|
|
|
b = Tensor.zeros(4,1,4)
|
|
|
|
c = a.cat(b, dim=1)
|
2024-01-31 00:58:10 +08:00
|
|
|
np.testing.assert_allclose(c.numpy(), np.concatenate((a.numpy(), b.numpy()), axis=1))
|
2024-01-14 01:46:51 +08:00
|
|
|
|
2024-02-15 21:20:30 +08:00
|
|
|
def test_const_dtype(self):
|
|
|
|
lb: LazyBuffer = Tensor([1], dtype=dtypes.int).lazydata
|
2024-09-05 16:19:17 +08:00
|
|
|
assert lb.const_like(1).base.arg == 1
|
|
|
|
assert type(lb.const_like(1).base.arg) is int
|
2024-02-15 21:20:30 +08:00
|
|
|
|
|
|
|
lb: LazyBuffer = Tensor([1], dtype=dtypes.float).lazydata
|
2024-09-05 16:19:17 +08:00
|
|
|
assert lb.const_like(1).base.arg == 1.0
|
|
|
|
assert type(lb.const_like(1).base.arg) is float
|
2024-02-15 21:20:30 +08:00
|
|
|
|
2024-02-28 10:29:25 +08:00
|
|
|
class TestReduceOp(unittest.TestCase):
|
|
|
|
def test_no_split_reduce_kernel(self):
|
|
|
|
a = Tensor.rand(4, 4).realize()
|
|
|
|
a = a.sum()
|
|
|
|
sched = create_schedule([a.lazydata])
|
|
|
|
assert len(sched) == 1
|
2024-08-17 03:09:00 +08:00
|
|
|
self.assertIs(sched[0].ast.src[0].src[2].op, UOps.REDUCE_AXIS)
|
2024-02-28 10:29:25 +08:00
|
|
|
|
|
|
|
def test_split_reduce_kernel_dim0(self):
|
|
|
|
a = Tensor.rand(256, 255).realize()
|
|
|
|
a = a.sum()
|
|
|
|
sched = create_schedule([a.lazydata])
|
|
|
|
assert len(sched) == 2
|
|
|
|
for s in sched:
|
2024-08-17 03:09:00 +08:00
|
|
|
self.assertIs(s.ast.src[0].src[2].op, UOps.REDUCE_AXIS)
|
2024-02-28 10:29:25 +08:00
|
|
|
|
|
|
|
def test_split_reduce_kernel_dim1(self):
|
|
|
|
a = Tensor.rand(255, 256).realize()
|
|
|
|
a = a.sum()
|
|
|
|
sched = create_schedule([a.lazydata])
|
|
|
|
assert len(sched) == 2
|
|
|
|
for s in sched:
|
2024-08-17 03:09:00 +08:00
|
|
|
self.assertIs(s.ast.src[0].src[2].op, UOps.REDUCE_AXIS)
|
2024-02-28 10:29:25 +08:00
|
|
|
|
2024-05-08 00:56:23 +08:00
|
|
|
class TestView(unittest.TestCase):
|
|
|
|
def test_all_masked_out(self):
|
2024-07-13 04:26:50 +08:00
|
|
|
# start with non CONST MetaOps
|
2024-05-08 00:56:23 +08:00
|
|
|
a = Tensor.rand(10, 10)
|
2024-07-13 04:26:50 +08:00
|
|
|
assert a.lazydata.base.op is not MetaOps.CONST
|
2024-05-08 00:56:23 +08:00
|
|
|
|
|
|
|
# all masked out, degrades to const 0
|
|
|
|
b = a.pad(((0, 10), None))[10:]
|
|
|
|
assert b.shape == (10, 10)
|
2024-07-13 04:26:50 +08:00
|
|
|
assert b.lazydata.base.op is MetaOps.CONST and b.lazydata.base.arg == 0
|
2024-05-08 00:56:23 +08:00
|
|
|
|
|
|
|
# mask out dim = 1 works too
|
|
|
|
b = a.pad((None, (0, 10)))[:, 10:]
|
|
|
|
assert b.shape == (10, 10)
|
2024-07-13 04:26:50 +08:00
|
|
|
assert b.lazydata.base.op is MetaOps.CONST and b.lazydata.base.arg == 0
|
2024-05-08 00:56:23 +08:00
|
|
|
|
|
|
|
# partial masked out does not degrade into CONST
|
|
|
|
b = a.pad(((0, 5), None))[5:]
|
|
|
|
assert b.shape == (10, 10)
|
2024-07-13 04:26:50 +08:00
|
|
|
assert b.lazydata.base.op is not MetaOps.CONST
|
2024-05-08 00:56:23 +08:00
|
|
|
|
2023-07-08 09:35:05 +08:00
|
|
|
if __name__ == "__main__":
|
|
|
|
unittest.main()
|