update pytest marks and CI test filters (#2587)

* remove pytest marks

* test more stuff

* fine revert some

* add that mark back

* skip that

* hmm LLVM does not work on ubuntu

* too slow on CUDA CI

* dup test
This commit is contained in:
chenyu 2023-12-03 15:20:44 -05:00 committed by GitHub
parent 88a5c368d4
commit 1ac958a058
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 5 additions and 17 deletions

View File

@ -411,13 +411,13 @@ jobs:
DEBUG=5 PYTHONPATH=${{ github.workspace }} FORWARD_ONLY=1 python3 test/test_ops.py TestOps.test_add
- name: Run pytest (not cuda)
if: matrix.backend!='cuda' && matrix.backend!='ptx' && matrix.backend!='triton'
run: python -m pytest -n=auto test/ -k '${{matrix.backend=='llvm'&&'not (test_nn.py and test_conv_transpose2d)'||'test'}}' -m 'not exclude_${{matrix.backend}}' --durations=20
run: python -m pytest -n=auto test/ -m 'not exclude_${{matrix.backend}}' --durations=20
- name: Run ONNX (only LLVM)
if: matrix.backend == 'llvm'
run: python -m pytest -n=auto test/external/external_test_onnx_backend.py --durations=20
- name: Run pytest (cuda)
if: matrix.backend=='cuda'||matrix.backend=='ptx'||matrix.backend=='triton'
run: python -m pytest -n=auto test/ -k 'not (half or test_efficientnet_safetensors) and not (test_conv2d and test_tensor.py)' -m 'not exclude_cuda' --ignore=test/external --ignore=test/models --durations=20
run: python -m pytest -n=auto test/ -k 'not (half or test_efficientnet_safetensors)' -m 'not exclude_cuda' --ignore=test/external --ignore=test/models --durations=20
#testunicorn:
# name: ARM64 unicorn Test

View File

@ -3,5 +3,4 @@ markers =
exclude_cuda
exclude_gpu
exclude_clang
webgpu
onnx_coverage

View File

@ -1,9 +1,6 @@
import unittest
import numpy as np
from tinygrad.tensor import Tensor, Device
import pytest
pytestmark = [pytest.mark.exclude_cuda]
class TestConv(unittest.TestCase):
def test_simple(self):

View File

@ -11,9 +11,6 @@ from tinygrad.helpers import prod, dtypes
from tinygrad.lazy import Buffer, create_lazybuffer
from tinygrad.device import CompiledASTRunner, Device
from tinygrad.shape.shapetracker import ShapeTracker
import pytest
pytestmark = pytest.mark.webgpu
# we don't always have GPU support, so the type signature is the abstract CompiledBuffer instead of GPUBuffer
def atan2_gpu(ret:Buffer, a:Buffer, b:Buffer):

View File

@ -4,12 +4,9 @@ import numpy as np
import torch
from tinygrad.tensor import Tensor
import tinygrad.nn as nn
import pytest
from tinygrad.helpers import dtypes
from functools import partial
pytestmark = pytest.mark.webgpu
# https://gist.github.com/devries/11405101
def ksprob(a):
fac, total, termbf = 2.0, 0.0, 0.0

View File

@ -1,12 +1,10 @@
import unittest
from tinygrad.tensor import Tensor
from tinygrad.helpers import dtypes
from tinygrad.helpers import CI, dtypes
from tinygrad import Device
import pytest
# similar to test/external/external_test_gpu_ast.py, but universal
pytestmark = pytest.mark.exclude_cuda
@unittest.skipIf(Device.DEFAULT == "CUDA" and CI, "slow on CUDA CI")
class TestSpecific(unittest.TestCase):
# from openpilot
@ -22,7 +20,7 @@ class TestSpecific(unittest.TestCase):
w = Tensor.randn(2048, 512)
(x @ w).reshape(1, 128, 4).contiguous().realize()
@unittest.skipIf(Device.DEFAULT in ["LLVM", "WEBGPU"], "Broken on LLVM and webgpu")
@unittest.skipIf(Device.DEFAULT in ["LLVM", "WEBGPU", "CUDA"], "Broken on LLVM, WEBGPU and CUDA")
def test_big_vec_mul(self):
# from LLaMA
# 0 buffer<4096, dtypes.float> [View((1024, 1, 1, 4), (4, 0, 0, 1), 0, None)]