diff --git a/test/test_dtype.py b/test/test_dtype.py index 689be1d7..0240d134 100644 --- a/test/test_dtype.py +++ b/test/test_dtype.py @@ -189,7 +189,7 @@ class TestBFloat16DTypeCast(unittest.TestCase): converted = random_values.cast(dtypes.bfloat16).cast(dtypes.float32) np.testing.assert_allclose(converted.numpy(), random_values.cast(dtypes.float32).numpy(), rtol=1e-2, atol=1e-3) -class TestHalfDtype(TestDType): DTYPE = dtypes.half +class TestHalfDType(TestDType): DTYPE = dtypes.half class TestFloatDType(TestDType): DTYPE = dtypes.float @@ -198,7 +198,7 @@ class TestFloatDType(TestDType): _test_op(lambda: Tensor([-0.9, -0.3, 1.2], dtype=dtypes.float32).cast(dtypes.uint32), dtypes.uint32, [0, 0, 1]) -class TestDoubleDtype(TestDType): +class TestDoubleDType(TestDType): DTYPE = dtypes.double @unittest.skipIf((CI and Device.DEFAULT in {"CUDA", "NV"}) or getenv("PTX"), "conversion not supported on CUDACPU and PTX") # TODO: why not? def test_float64_increased_precision(self): @@ -222,7 +222,7 @@ class TestDoubleDtype(TestDType): dtypes.float32, [float('inf'), 3.4e38, 1, 0]) -class TestInt8Dtype(TestDType): +class TestInt8DType(TestDType): DTYPE = dtypes.int8 @unittest.skipIf(getenv("CUDA",0)==1 or getenv("PTX", 0)==1, "cuda saturation works differently") def test_int8_to_uint8_negative(self): @@ -231,7 +231,7 @@ class TestInt8Dtype(TestDType): def test_int8_to_uint16_negative(self): _test_op(lambda: Tensor([-1, -2, -3, -4], dtype=dtypes.int8).cast(dtypes.uint16), dtypes.uint16, [2**16-1, 2**16-2, 2**16-3, 2**16-4]) -class TestUint8Dtype(TestDType): +class TestUint8DType(TestDType): DTYPE = dtypes.uint8 @unittest.skipIf(getenv("CUDA",0)==1 or getenv("PTX", 0)==1, "cuda saturation works differently") def test_uint8_to_int8_overflow(self): @@ -253,21 +253,21 @@ class TestBitCast(unittest.TestCase): b = a.bitcast(dtypes.float32) assert b.numpy()[0,0] == 1. -class TestInt16Dtype(TestDType): DTYPE = dtypes.int16 +class TestInt16DType(TestDType): DTYPE = dtypes.int16 -class TestUint16Dtype(TestDType): +class TestUint16DType(TestDType): DTYPE = dtypes.uint16 def test_uint16_to_int8_overflow(self): _test_op(lambda: Tensor([2**16-1, 2**16-2, 1, 0], dtype=dtypes.uint16).cast(dtypes.int8), dtypes.int8, [-1, -2, 1, 0]) -class TestInt32Dtype(TestDType): DTYPE = dtypes.int32 -class TestUint32Dtype(TestDType): DTYPE = dtypes.uint32 +class TestInt32DType(TestDType): DTYPE = dtypes.int32 +class TestUint32DType(TestDType): DTYPE = dtypes.uint32 -class TestInt64Dtype(TestDType): DTYPE = dtypes.int64 -class TestUint64Dtype(TestDType): DTYPE = dtypes.uint64 +class TestInt64DType(TestDType): DTYPE = dtypes.int64 +class TestUint64DType(TestDType): DTYPE = dtypes.uint64 -class TestBoolDtype(TestDType): DTYPE = dtypes.bool +class TestBoolDType(TestDType): DTYPE = dtypes.bool class TestImageDType(unittest.TestCase): def test_image_scalar(self):