tinygrad/test/test_dtype.py

45 lines
1.4 KiB
Python
Raw Normal View History

import unittest
import numpy as np
from tinygrad.helpers import getenv
from tinygrad.lazy import Device
from tinygrad.tensor import Tensor, dtypes
# for GPU, cl_khr_fp16 isn't supported
# for LLVM, it segfaults because it can't link to the casting function
@unittest.skipIf(getenv("CI", "") != "" and Device.DEFAULT in ["GPU", "LLVM"], "float16 broken in some CI backends")
class TestDtype(unittest.TestCase):
def test_half_to_np(self):
a = Tensor([1,2,3,4], dtype=dtypes.float16)
print(a)
na = a.numpy()
print(na, na.dtype, a.lazydata.realized)
assert na.dtype == np.float16
2023-03-12 13:51:22 +08:00
np.testing.assert_allclose(na, [1,2,3,4])
def test_half_add(self):
a = Tensor([1,2,3,4], dtype=dtypes.float16)
b = Tensor([1,2,3,4], dtype=dtypes.float16)
c = a+b
print(c.numpy())
assert c.dtype == dtypes.float16
2023-03-12 13:51:22 +08:00
np.testing.assert_allclose(c.numpy(), [2,4,6,8])
def test_upcast_float(self):
# NOTE: there's no downcasting support
a = Tensor([1,2,3,4], dtype=dtypes.float16).float()
print(a)
na = a.numpy()
print(na, na.dtype)
assert na.dtype == np.float32
2023-03-12 13:51:22 +08:00
np.testing.assert_allclose(na, [1,2,3,4])
def test_half_add_upcast(self):
a = Tensor([1,2,3,4], dtype=dtypes.float16)
b = Tensor([1,2,3,4], dtype=dtypes.float32)
c = a+b
print(c.numpy())
assert c.dtype == dtypes.float32
2023-03-12 13:51:22 +08:00
np.testing.assert_allclose(c.numpy(), [2,4,6,8])
if __name__ == '__main__':
unittest.main()