From 459488bba2ff838f2b0f1392561a30ee0b611ea6 Mon Sep 17 00:00:00 2001 From: George Hotz <72895+geohot@users.noreply.github.com> Date: Thu, 2 Mar 2023 20:06:20 -0800 Subject: [PATCH] fix linter (#630) * fix linter * no imports okay * explicit bases * disable in pylintrc --- .github/workflows/test.yml | 2 +- .pylintrc | 6 ++++-- tinygrad/__init__.py | 1 - tinygrad/codegen/llvm.py | 2 +- tinygrad/runtime/ops_cpu.py | 8 ++++---- tinygrad/runtime/ops_cuda.py | 4 ++-- tinygrad/runtime/ops_gpu.py | 2 +- tinygrad/runtime/ops_torch.py | 4 ++-- 8 files changed, 15 insertions(+), 14 deletions(-) delete mode 100644 tinygrad/__init__.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4bd00d2e..942580ad 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -40,7 +40,7 @@ jobs: - name: Lint tinygrad with pylint run: pylint tinygrad/ - name: Run mypy - run: mypy tinygrad/ --ignore-missing-imports --check-untyped-defs --warn-unreachable + run: mypy tinygrad/ --ignore-missing-imports --check-untyped-defs --explicit-package-bases --warn-unreachable testcpu: name: CPU Tests diff --git a/.pylintrc b/.pylintrc index 47baaa97..93dff0b9 100644 --- a/.pylintrc +++ b/.pylintrc @@ -54,9 +54,11 @@ confidence= # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" -disable=C,R,W0613,W0511,W0212,W0201,W0106,W0603,W0621,W0703,W1201,W1203,E1136,W1514,E1101,W0221,W0632 +disable=C,R,W0613,W0511,W0212,W0201,W0106,W0603,W0621,W0703,W1201,W1203,E1136,W1514,E1101,W0221,W0105,E0401 # E1101 for mlops binding -# W0221,W0632 for Function class +# W0221 for Function class +# W0105 for comment strings +# E0401 for missing imports # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option diff --git a/tinygrad/__init__.py b/tinygrad/__init__.py deleted file mode 100644 index e49d8ccd..00000000 --- a/tinygrad/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from tinygrad import tensor, nn # noqa: F401 diff --git a/tinygrad/codegen/llvm.py b/tinygrad/codegen/llvm.py index 2bbabc2d..e1e55312 100644 --- a/tinygrad/codegen/llvm.py +++ b/tinygrad/codegen/llvm.py @@ -151,7 +151,7 @@ class LLVMCodegen(ASTKernel): return m if isinstance(x.op, ReduceOps): if reduce_result is None: - raise Exception("no reduce") + raise RuntimeError("no reduce") return reduce_result values = [ast_parse(builder, v, level, reduce_result) for v in x.src] diff --git a/tinygrad/runtime/ops_cpu.py b/tinygrad/runtime/ops_cpu.py index ef26550c..0f50b369 100644 --- a/tinygrad/runtime/ops_cpu.py +++ b/tinygrad/runtime/ops_cpu.py @@ -23,10 +23,10 @@ def einsum_mulacc(einsum, get_strides, expand): return mulacc numpy_fxn_for_op : Dict[Op, Callable] = {**base_fxn_for_op, **{ - UnaryOps.NOOP: lambda x: np.ascontiguousarray(x), UnaryOps.EXP: lambda x: np.exp(x), UnaryOps.LOG: lambda x: np.log(x), + UnaryOps.NOOP: np.ascontiguousarray, UnaryOps.EXP: np.exp, UnaryOps.LOG: np.log, BinaryOps.MAX: np.maximum, BinaryOps.CMPEQ: lambda x,y: (x==y).astype(np.float32), - MovementOps.FLIP: lambda x, axis: np.flip(x, axis), MovementOps.PERMUTE: lambda x, order: x.transpose(order), - MovementOps.PAD: lambda x, padding: np.pad(x, padding), MovementOps.EXPAND: lambda x, new_shape: np.broadcast_to(x, new_shape), + MovementOps.FLIP: np.flip, MovementOps.PERMUTE: lambda x, order: x.transpose(order), + MovementOps.PAD: np.pad, MovementOps.EXPAND: np.broadcast_to, FusedOps.MULACC: einsum_mulacc(lambda s,a,b: np.einsum(s, a.copy(), b.copy()), lambda x: x.strides, np.broadcast_to) }} @@ -35,4 +35,4 @@ class CPUBuffer(InterpretedBuffer): @staticmethod def fromCPU(x): return CPUBuffer(x) - def toCPU(x): return x._buf + def toCPU(self): return self._buf diff --git a/tinygrad/runtime/ops_cuda.py b/tinygrad/runtime/ops_cuda.py index 6b034bc2..927492b2 100644 --- a/tinygrad/runtime/ops_cuda.py +++ b/tinygrad/runtime/ops_cuda.py @@ -1,7 +1,7 @@ from typing import Optional import pycuda.autoprimaryctx # type: ignore # pylint: disable=unused-import # noqa: F401 import pycuda.driver as cuda # type: ignore -from pycuda.compiler import compile # type: ignore +from pycuda.compiler import compile as cuda_compile # type: ignore import numpy as np from tinygrad.helpers import DEBUG from tinygrad.ops import CompiledBuffer, RawBufferCopyInOut @@ -14,7 +14,7 @@ class RawCUDABuffer(RawBufferCopyInOut): class CUDAProgram: def __init__(self, name:str, prg:str, binary=False): - if not binary: prg = compile(prg, target="ptx", no_extern_c=True).decode('utf-8') + if not binary: prg = cuda_compile(prg, target="ptx", no_extern_c=True).decode('utf-8') if DEBUG >= 5: print(prg) # TODO: name is wrong self.prg = cuda.module_from_buffer(prg.encode('utf-8')).get_function(prg.split(".visible .entry ")[1].split("(")[0]) diff --git a/tinygrad/runtime/ops_gpu.py b/tinygrad/runtime/ops_gpu.py index 9fd1d954..e8a7be95 100644 --- a/tinygrad/runtime/ops_gpu.py +++ b/tinygrad/runtime/ops_gpu.py @@ -46,7 +46,7 @@ class CLBuffer(RawBufferCopyInOut): def copyin(self, x:np.ndarray): cl.enqueue_copy(CL.cl_queue, self._cl, x, is_blocking=False) def copyout(self, x:np.ndarray): cl.enqueue_copy(CL.cl_queue, x, self._cl, is_blocking=True) -class CLImage(RawBuffer): +class CLImage(RawBuffer): # pylint: disable=abstract-method fmt : Final = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.HALF_FLOAT if FLOAT16 else cl.channel_type.FLOAT) IMAGE : Final = True diff --git a/tinygrad/runtime/ops_torch.py b/tinygrad/runtime/ops_torch.py index 7623b0d5..c6e0a208 100644 --- a/tinygrad/runtime/ops_torch.py +++ b/tinygrad/runtime/ops_torch.py @@ -16,5 +16,5 @@ class TorchBuffer(InterpretedBuffer): fxn_for_op : ClassVar = torch_fxn_for_op @staticmethod - def fromCPU(data): return TorchBuffer(torch.from_numpy(data).requires_grad_(False).to(device)) - def toCPU(x): return x._buf.cpu().numpy() + def fromCPU(x): return TorchBuffer(torch.from_numpy(x).requires_grad_(False).to(device)) + def toCPU(self): return self._buf.cpu().numpy()