diff --git a/ruff.toml b/ruff.toml index 7f860b8b..a856b504 100644 --- a/ruff.toml +++ b/ruff.toml @@ -26,6 +26,7 @@ lint.select = [ "C416", # unnecessary-comprehension "RET506", # superfluous-else-raise "RET507", # superfluous-else-continue + "A", # builtin-variable-shadowing, builtin-argument-shadowing, builtin-attribute-shadowing ] line-length = 150 diff --git a/test/external/external_test_opt.py b/test/external/external_test_opt.py index c81b100b..d58968f2 100644 --- a/test/external/external_test_opt.py +++ b/test/external/external_test_opt.py @@ -25,7 +25,7 @@ class CLCache: capturing.append(self) print("cache: entering") return self - def __exit__(self, type, value, traceback): + def __exit__(self, _type, value, traceback): capturing.clear() print(f"cache: exiting with size {self.count}", f"allowed {self.allowed}" if self.allowed is not None else "") if self.allowed is not None: diff --git a/test/external/external_test_whisper_librispeech.py b/test/external/external_test_whisper_librispeech.py index 32c891ba..264bb3f0 100644 --- a/test/external/external_test_whisper_librispeech.py +++ b/test/external/external_test_whisper_librispeech.py @@ -51,12 +51,12 @@ def run_evaluation(model_name, tinygrad_expected_wer, reference_wer): class LibriSpeech(torch.utils.data.Dataset): def __init__(self): - dir = pathlib.Path(__file__).parent.parent.parent / "extra" / "datasets" / "librispeech" - if not os.path.exists(dir): - os.makedirs(dir) + folder = pathlib.Path(__file__).parent.parent.parent / "extra" / "datasets" / "librispeech" + if not os.path.exists(folder): + os.makedirs(folder) self.dataset = torchaudio.datasets.LIBRISPEECH( - root=dir, + root=folder, url="test-clean", download=True, ) diff --git a/test/test_nn.py b/test/test_nn.py index 81a20c35..f4a7b11f 100755 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -16,14 +16,14 @@ class TestNN(unittest.TestCase): @unittest.skipIf(Device.DEFAULT == "WEBGPU", "no int64 on WebGPU") def test_sparse_cat_cross_entropy(self): # create in tinygrad - input = Tensor.randn(5, 5) + input_tensor = Tensor.randn(5, 5) target = Tensor([0, 0, 0, 1, 2]) # torch doesn't support target=-1 - torch_input = torch.tensor(input.numpy()) + torch_input = torch.tensor(input_tensor.numpy()) torch_target = torch.tensor(target.numpy(), dtype=torch.long) for smoothing in [0.0, 0.1, 0.5, 1.0]: for ignore_index in [-1, 0, 2]: - loss = input.sparse_categorical_crossentropy(target, label_smoothing=smoothing, ignore_index=ignore_index) + loss = input_tensor.sparse_categorical_crossentropy(target, label_smoothing=smoothing, ignore_index=ignore_index) torch_loss = torch.nn.CrossEntropyLoss(reduction='mean', label_smoothing=smoothing, ignore_index=ignore_index)(torch_input, torch_target) np.testing.assert_allclose(loss.numpy(), torch_loss.detach().numpy(), atol=1e-5, rtol=1e-6) diff --git a/test/test_uops.py b/test/test_uops.py index b5d9d5ba..d06c66e8 100644 --- a/test/test_uops.py +++ b/test/test_uops.py @@ -12,10 +12,10 @@ from tinygrad.engine.realize import CompiledRunner, lower_schedule_item from tinygrad.codegen.uops import UOps, UOp, UOpGraph from test.helpers import is_dtype_supported -def _uops_to_prg(uops_list, print=False): +def _uops_to_prg(uops_list, print_uops=False): uops = UOpGraph(uops_list) src = Device[Device.DEFAULT].renderer.render("test", uops) - if print: uops.print() + if print_uops: uops.print() has_local = Device[Device.DEFAULT].renderer.has_local return CompiledRunner(Program("test", src, Device.DEFAULT, [1,1,1] if has_local else None, [1,1,1] if has_local else None, uops=uops)) @@ -59,7 +59,7 @@ def _test_uops_result(output_dtype, uops, res): # res = output_fn(uops) out = uop(uops, UOps.STORE, None, (buf_store, uop(uops, UOps.CONST, dtypes.int32, (), 0), res)) buf = Buffer(Device.DEFAULT, 1, output_dtype).allocate() - prg = _uops_to_prg([out], print=True) + prg = _uops_to_prg([out], print_uops=True) prg.exec([buf]) ret = np.empty(1, _to_np_dtype(output_dtype)) buf.copyout(ret.data) diff --git a/test/testextra/test_export_model.py b/test/testextra/test_export_model.py index 4d0671c3..a41f16a2 100644 --- a/test/testextra/test_export_model.py +++ b/test/testextra/test_export_model.py @@ -31,9 +31,9 @@ class TextModelExport(unittest.TestCase): def test_multi_output_model_export(self): model = MockMultiOutputModel() - input = Tensor.rand(2,2) - outputs = model(input) - prg, _, out_sizes, _ = export_model(model, "", input) + input_tensor = Tensor.rand(2,2) + outputs = model(input_tensor) + prg, _, out_sizes, _ = export_model(model, "", input_tensor) prg = json.loads(prg) assert len(outputs) == len(prg["outputs"]) == len(out_sizes), f"Model and exported outputs don't match: mdl={len(outputs)}, prg={len(prg['outputs'])}, inp_sizes={len(out_sizes)}" # noqa: E501 diff --git a/test/unit/test_shapetracker.py b/test/unit/test_shapetracker.py index 8cdfd0bc..ed0b6ade 100644 --- a/test/unit/test_shapetracker.py +++ b/test/unit/test_shapetracker.py @@ -7,10 +7,10 @@ from tinygrad.shape.symbolic import Variable, NumNode from itertools import product def shapetracker_getitem(st, val): - locals = {"idx0": val, "valid": 1} + _locals = {"idx0": val, "valid": 1} idx, valid = st.reshape((st.size,)).expr_idxs() - exec(f"valid={valid.render()};idx0={idx.render()}", None, locals) - return locals["idx0"] if locals["valid"] else -1 + exec(f"valid={valid.render()};idx0={idx.render()}", None, _locals) + return _locals["idx0"] if _locals["valid"] else -1 class CheckingShapeTracker: def __init__(self, shape):