diff --git a/test/test_dtype.py b/test/test_dtype.py index 5ea34fca..a83a224d 100644 --- a/test/test_dtype.py +++ b/test/test_dtype.py @@ -161,7 +161,25 @@ class TestHalfDtype(TestDType): DTYPE = dtypes.half class TestFloatDType(TestDType): DTYPE = dtypes.float -class TestDoubleDtype(TestDType): DTYPE = dtypes.double +class TestDoubleDtype(TestDType): + DTYPE = dtypes.double + @unittest.skipIf(getenv("CUDACPU",0)==1, "conversion not supported on CUDACPU") + @unittest.skipIf(getenv("HIP",0)==1, "HIP renderer does not support f64 precision") + def test_float64_increased_precision(self): + for func in [ + lambda t: t.exp(), + lambda t: t.exp2(), + lambda t: t.log(), + lambda t: t.log2(), + lambda t: t.sqrt(), + lambda t: t.rsqrt(), + lambda t: t.sin(), + lambda t: t.cos(), + lambda t: t.tan(), + lambda t: t.sigmoid(), + ]: + a = [2, 3, 4] + np.testing.assert_allclose(func(Tensor(a, dtype=self.DTYPE)).numpy(), func(torch.tensor(a, dtype=torch.float64)), rtol=1e-12, atol=1e-12) class TestInt8Dtype(TestDType): DTYPE = dtypes.int8 diff --git a/tinygrad/renderer/cstyle.py b/tinygrad/renderer/cstyle.py index 7b0c7026..4264a6e1 100644 --- a/tinygrad/renderer/cstyle.py +++ b/tinygrad/renderer/cstyle.py @@ -45,6 +45,7 @@ class CStyleLanguage(NamedTuple): def render_const(self, x:Union[float,int,bool], var_dtype) -> str: if math.isnan(x): val = "NAN" elif math.isinf(x): val = ("-" if x < 0 else "") + "INFINITY" + elif var_dtype == dtypes.float64: val = f"{float(x)}" else: val = f"{float(x)}f" if dtypes.is_float(var_dtype) else f"{int(x)}" if dtypes.is_int(var_dtype) else f"{bool(x)}".lower() return (self.render_cast([val]*var_dtype.count, var_dtype) if var_dtype.count > 1 or var_dtype not in [dtypes.float, dtypes.int, dtypes.bool] else val)