enable METAL tests now runner is M1 and no fast-math (#3523)

This commit is contained in:
chenyu 2024-02-28 14:14:23 -05:00 committed by GitHub
parent 1136e2a82a
commit d89e3c4e08
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 3 additions and 13 deletions

View File

@ -168,10 +168,6 @@ if Device.DEFAULT == "METAL" or (OSX and Device.DEFAULT == "GPU"):
backend_test.exclude('test_mish_cpu')
backend_test.exclude('test_mish_expanded_cpu')
if Device.DEFAULT == 'METAL':
# with default fast math enabled, padding -inf does not work
backend_test.exclude('test_MaxPool3d_stride_padding_cpu')
# TODO: llvm has problems with inf
if Device.DEFAULT in ['LLVM']:
backend_test.exclude('test_isinf_cpu')
@ -179,7 +175,7 @@ if Device.DEFAULT in ['LLVM']:
backend_test.exclude('test_isinf_positive_cpu')
# # TODO: problems with nan
if Device.DEFAULT in ['LLVM', 'METAL']:
if Device.DEFAULT in ['LLVM']:
backend_test.exclude('test_isnan_float16_cpu')
backend_test.exclude('test_isnan_cpu')

View File

@ -69,10 +69,7 @@ def universal_test_unary(a, dtype, op):
tensor_value = out.numpy()
numpy_value = op[1](np.array([a]).astype(dtype.np))
if dtype in dtypes_float:
atol = 2 if (Device.DEFAULT == "METAL" or getenv("PTX")) and op[0] == Tensor.sin else 1e-3
rtol = 2 if Device.DEFAULT == "METAL" and op[0] == Tensor.sin else 1e-4 if dtype == dtypes.float32 else 1e-2
# exp and log and sin are approximations (in METAL, the default fast-math versions are less precise)
np.testing.assert_allclose(tensor_value, numpy_value, atol=atol, rtol=rtol)
np.testing.assert_allclose(tensor_value, numpy_value, atol=1e-3, rtol=1e-2)
else: np.testing.assert_equal(tensor_value, numpy_value)
if op[0] != Tensor.reciprocal: # reciprocal is not supported in most backends
op = [x for x in ast.lazyops if x.op in UnaryOps][0]

View File

@ -351,12 +351,10 @@ class TestOps(unittest.TestCase):
helper_test_op([()], lambda x: x/2)
helper_test_op([()], lambda x: 2/x)
@unittest.skipIf(Device.DEFAULT in ["METAL", "WEBGPU"], "METAL has issues with -inf")
def test_mul_naninf(self):
helper_test_op([(45,65)], lambda x: x*math.inf)
helper_test_op([(45,65)], lambda x: x*-math.inf)
helper_test_op([(45,65)], lambda x: x*math.nan)
@unittest.skipIf(Device.DEFAULT in ["METAL", "WEBGPU"], "METAL has issues with -inf")
def test_div_naninf(self):
helper_test_op([(45,65)], lambda x: x/math.inf)
helper_test_op([(45,65)], lambda x: x/-math.inf)
@ -474,8 +472,7 @@ class TestOps(unittest.TestCase):
def test_gelu(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.gelu(x, approximate="tanh"), Tensor.gelu)
if not (CI and Device.DEFAULT == "METAL"):
helper_test_op([(45,65)], lambda x: torch.nn.functional.gelu(x, approximate="tanh"), Tensor.gelu, low=300, high=303)
helper_test_op([(45,65)], lambda x: torch.nn.functional.gelu(x, approximate="tanh"), Tensor.gelu, low=300, high=303)
helper_test_op([(45,65)], lambda x: torch.nn.functional.gelu(x, approximate="tanh"), Tensor.gelu, low=-300, high=-297)
def test_quick_gelu(self):
helper_test_op([(45,65)], lambda x: x * torch.sigmoid(1.702 * x), Tensor.quick_gelu)