mirror of https://github.com/commaai/tinygrad.git
qcom remove some tests skips (#6353)
This commit is contained in:
parent
e8d1a37ecd
commit
326a77336e
|
@ -344,7 +344,7 @@ class TestOps(unittest.TestCase):
|
|||
helper_test_op([(45,35), (45,35), (45,35)], lambda x,y,z: x.lerp(y,z))
|
||||
helper_test_op(None, lambda x,y,z: x.lerp(y,z), vals=[[1.,2.,3.], [4.,5.,6.], 0.5])
|
||||
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU/QCOM backends)")
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU(qcom)/QCOM backends)")
|
||||
def test_tril(self):
|
||||
helper_test_op([(3,3)], lambda x: x.tril())
|
||||
helper_test_op([(3,3)], lambda x: x.tril(1))
|
||||
|
@ -362,7 +362,7 @@ class TestOps(unittest.TestCase):
|
|||
helper_test_op([(5,3,3)], lambda x: x.tril(1))
|
||||
helper_test_op(None, lambda x: x.tril(), vals=[[[True] * 3] * 3], forward_only=True)
|
||||
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU/QCOM backends)")
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU(qcom)/QCOM backends)")
|
||||
def test_triu(self):
|
||||
helper_test_op([(3,3)], lambda x: x.triu())
|
||||
helper_test_op([(3,3)], lambda x: x.triu(1))
|
||||
|
@ -671,7 +671,6 @@ class TestOps(unittest.TestCase):
|
|||
|
||||
def test_small_cumsum(self):
|
||||
helper_test_op([(10)], lambda x: torch.cumsum(x, dim=0), lambda x: Tensor.cumsum(x, axis=0))
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "Qualcomm fails on this (both on GPU/QCOM backends)")
|
||||
def test_simple_cumsum(self):
|
||||
helper_test_op([(512)], lambda x: torch.cumsum(x, dim=0), lambda x: Tensor.cumsum(x, axis=0))
|
||||
helper_test_op([(1022)], lambda x: torch.cumsum(x, dim=0), lambda x: Tensor.cumsum(x, axis=0))
|
||||
|
@ -862,7 +861,6 @@ class TestOps(unittest.TestCase):
|
|||
def test_small_gemm_eye(self):
|
||||
helper_test_op(None, lambda x,y: x.matmul(y), lambda x,y: x@y, vals=[np.eye(8).astype(np.float32), np.eye(8).astype(np.float32)])
|
||||
@unittest.skipIf(CI and Device.DEFAULT in ["NV", "LLVM", "GPU", "CUDA"], "not supported on these in CI")
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "Qualcomm fails on this (both on GPU/QCOM backends)")
|
||||
def test_gemm_fp16(self):
|
||||
helper_test_op([(64,64), (64,64)], lambda x,y: x.half().matmul(y.half()), atol=5e-3, rtol=5e-3)
|
||||
def test_gemm(self):
|
||||
|
@ -939,31 +937,27 @@ class TestOps(unittest.TestCase):
|
|||
helper_test_op([(3,4,5,6)], lambda x: x.max(axis=1)[0], lambda x: x.max(axis=1))
|
||||
helper_test_op([()], lambda x: x.max())
|
||||
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU/QCOM backends)")
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU(qcom)/QCOM backends)")
|
||||
def test_any(self):
|
||||
helper_test_op([(3,4,5,6)], lambda x: x.any(), forward_only=True)
|
||||
helper_test_op(None, lambda x: x.any(), vals=[[True, True]], forward_only=True)
|
||||
helper_test_op(None, lambda x: x.any(), vals=[[True, False]], forward_only=True)
|
||||
helper_test_op(None, lambda x: x.any(), vals=[[False, False]], forward_only=True)
|
||||
helper_test_op([()], lambda x: x.any(), forward_only=True)
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU/QCOM backends)")
|
||||
def test_any_axis(self):
|
||||
helper_test_op([(3,4,5,6)], lambda x: x.any(axis=(1,2)), forward_only=True)
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU/QCOM backends)")
|
||||
def test_any_zero_axis(self):
|
||||
helper_test_op([(1,0,3,0,5)], lambda x: x.any(axis=(1,3)), forward_only=True)
|
||||
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU/QCOM backends)")
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU(qcom)/QCOM backends)")
|
||||
def test_all(self):
|
||||
helper_test_op([(3,4,5,6)], lambda x: x.all(), forward_only=True)
|
||||
helper_test_op(None, lambda x: x.all(), vals=[[True, True]], forward_only=True)
|
||||
helper_test_op(None, lambda x: x.all(), vals=[[True, False]], forward_only=True)
|
||||
helper_test_op(None, lambda x: x.all(), vals=[[False, False]], forward_only=True)
|
||||
helper_test_op([()], lambda x: x.all(), forward_only=True)
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU/QCOM backends)")
|
||||
def test_all_axis(self):
|
||||
helper_test_op([(3,4,5,6)], lambda x: x.all(axis=(1,2)), forward_only=True)
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU/QCOM backends)")
|
||||
def test_all_zero_axis(self):
|
||||
helper_test_op([(1,0,3,0,5)], lambda x: x.all(axis=(1,3)), forward_only=True)
|
||||
|
||||
|
@ -1438,7 +1432,6 @@ class TestOps(unittest.TestCase):
|
|||
lambda x,w: Tensor.conv2d(x,w).relu(), grad_rtol=1e-5)
|
||||
|
||||
@unittest.skipIf(IMAGE>0, "no conv3d on images")
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "Qualcomm fails on this (both on GPU/QCOM backends)")
|
||||
def test_padded_conv3d(self):
|
||||
helper_test_op([(1,4,5,5,5), (4,4,3,3,3)],
|
||||
lambda x,w: torch.nn.functional.conv3d(x,w,padding=1).relu(),
|
||||
|
@ -1459,7 +1452,6 @@ class TestOps(unittest.TestCase):
|
|||
lambda x,w: torch.nn.functional.conv2d(x,w).relu(),
|
||||
lambda x,w: Tensor.conv2d(x,w).relu(), grad_rtol=1e-5)
|
||||
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "Qualcomm fails on this (both on GPU/QCOM backends)")
|
||||
def test_nested_conv2d(self):
|
||||
helper_test_op([(1,32,9,9), (32,32,3,3), (32,32,3,3)],
|
||||
lambda x,w1,w2: torch.nn.functional.conv2d(torch.nn.functional.conv2d(x,w1).relu(), w2).relu(),
|
||||
|
@ -1682,14 +1674,12 @@ class TestOps(unittest.TestCase):
|
|||
lambda x,w: torch.nn.functional.conv2d(torch.nn.functional.pad(x, p),w).relu(),
|
||||
lambda x,w: Tensor.conv2d(x,w,padding=p).relu())
|
||||
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "Qualcomm fails on this (both on GPU/QCOM backends)")
|
||||
def test_padded_conv2d_p21(self):
|
||||
bs,cin,H,W,padding = 4, 3, 3, 3, (2,1)
|
||||
helper_test_op([(bs,cin,11,28), (4,cin,H,W)],
|
||||
lambda x,w: torch.nn.functional.conv2d(x,w,padding=padding).relu(),
|
||||
lambda x,w: Tensor.conv2d(x,w,padding=padding).relu())
|
||||
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "Qualcomm fails on this (both on GPU/QCOM backends)")
|
||||
def test_padded_conv2d_p22(self):
|
||||
bs,cin,H,W,padding = 4, 3, 3, 3, (2,2)
|
||||
helper_test_op([(bs,cin,11,28), (4,cin,H,W)],
|
||||
|
@ -1869,7 +1859,6 @@ class TestOps(unittest.TestCase):
|
|||
lambda x: torch.nn.functional.interpolate(x, size=out_sz, mode="trilinear", align_corners=True),
|
||||
lambda x: Tensor.interpolate(x, size=out_sz, mode="linear", align_corners=True), atol=1e-4)
|
||||
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU/QCOM backends)")
|
||||
def test_cat(self):
|
||||
for dim in range(-2, 3):
|
||||
helper_test_op([(45,65,9), (45,65,9), (45,65,9)], lambda x,y,z: torch.cat((x,y,z), dim), lambda x,y,z: x.cat(y, z, dim=dim))
|
||||
|
@ -2120,7 +2109,7 @@ class TestOps(unittest.TestCase):
|
|||
helper_test_op([(32,10)], lambda x: x.masked_fill((x>0.1).detach(), -math.inf))
|
||||
helper_test_op([(32,10)], lambda x: x.masked_fill((x<0.1).detach(), -math.inf))
|
||||
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU/QCOM backends)")
|
||||
@unittest.skipIf(Device.DEFAULT == "QCOM", "OpenCL fails to compile this (both on GPU(qcom)/QCOM backends)")
|
||||
def test_cast(self):
|
||||
helper_test_op([(3, 3)], lambda x: x.float())
|
||||
helper_test_op(None, lambda x: x.float(), vals=[[0, 1, 2, 3]], forward_only=True)
|
||||
|
|
Loading…
Reference in New Issue