Removing METAL Skips as CI works (#2488)

* Test metal CI

* remove metal and CI restrictions

* enable dtype tests for metal ci
This commit is contained in:
Liam 2023-11-29 04:46:59 +01:00 committed by GitHub
parent 5588922884
commit cf0c9096a9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 2 additions and 6 deletions

View File

@ -240,9 +240,8 @@ jobs:
key: downloads-cache-metal-${{ env.DOWNLOAD_CACHE_VERSION }} key: downloads-cache-metal-${{ env.DOWNLOAD_CACHE_VERSION }}
- name: Test LLaMA compile speed - name: Test LLaMA compile speed
run: PYTHONPATH="." METAL=1 python test/external/external_test_speed_llama.py run: PYTHONPATH="." METAL=1 python test/external/external_test_speed_llama.py
#- name: Run dtype test - name: Run dtype test
# run: DEBUG=4 METAL=1 python -m pytest -n=auto test/test_dtype.py run: DEBUG=4 METAL=1 python -m pytest -n=auto test/test_dtype.py
# dtype test has issues on test_half_to_int8
- name: Check Device.DEFAULT (METAL) and print some source - name: Check Device.DEFAULT (METAL) and print some source
run: | run: |
METAL=1 python -c "from tinygrad import Device; assert Device.DEFAULT == 'METAL', Device.DEFAULT" METAL=1 python -c "from tinygrad import Device; assert Device.DEFAULT == 'METAL', Device.DEFAULT"

View File

@ -865,7 +865,6 @@ class TestOps(unittest.TestCase):
lambda x,w: torch.nn.functional.conv_transpose2d(x,w, stride=stride).relu(), lambda x,w: torch.nn.functional.conv_transpose2d(x,w, stride=stride).relu(),
lambda x,w: Tensor.conv_transpose2d(x,w,stride=stride).relu(), atol=1e-4, grad_rtol=1e-5) lambda x,w: Tensor.conv_transpose2d(x,w,stride=stride).relu(), atol=1e-4, grad_rtol=1e-5)
@unittest.skipIf(Device.DEFAULT == "METAL" and CI, "broken in METAL CI")
def test_output_padded_conv_transpose2d(self): def test_output_padded_conv_transpose2d(self):
for output_padding, stride in [((1,1), (2,3)), ((2,1), (3,2))]: for output_padding, stride in [((1,1), (2,3)), ((2,1), (3,2))]:
helper_test_op([(2,4,6,5), (4,4,3,3),(4,)], helper_test_op([(2,4,6,5), (4,4,3,3),(4,)],
@ -1036,14 +1035,12 @@ class TestOps(unittest.TestCase):
lambda x,w: torch.nn.functional.conv2d(torch.nn.functional.pad(x, p),w).relu(), lambda x,w: torch.nn.functional.conv2d(torch.nn.functional.pad(x, p),w).relu(),
lambda x,w: Tensor.conv2d(x,w,padding=p).relu(), atol=1e-4) lambda x,w: Tensor.conv2d(x,w,padding=p).relu(), atol=1e-4)
@unittest.skipIf(Device.DEFAULT == "METAL" and CI, "broken in METAL CI")
def test_padded_conv2d_p21(self): def test_padded_conv2d_p21(self):
bs,cin,H,W,padding = 4, 3, 3, 3, (2,1) bs,cin,H,W,padding = 4, 3, 3, 3, (2,1)
helper_test_op([(bs,cin,11,28), (4,cin,H,W)], helper_test_op([(bs,cin,11,28), (4,cin,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,padding=padding).relu(), lambda x,w: torch.nn.functional.conv2d(x,w,padding=padding).relu(),
lambda x,w: Tensor.conv2d(x,w,padding=padding).relu(), atol=1e-4) lambda x,w: Tensor.conv2d(x,w,padding=padding).relu(), atol=1e-4)
@unittest.skipIf(Device.DEFAULT == "METAL" and CI, "broken in METAL CI")
def test_padded_conv2d_p22(self): def test_padded_conv2d_p22(self):
bs,cin,H,W,padding = 4, 3, 3, 3, (2,2) bs,cin,H,W,padding = 4, 3, 3, 3, (2,2)
helper_test_op([(bs,cin,11,28), (4,cin,H,W)], helper_test_op([(bs,cin,11,28), (4,cin,H,W)],