mirror of https://github.com/commaai/tinygrad.git
fix linter (#630)
* fix linter * no imports okay * explicit bases * disable in pylintrc
This commit is contained in:
parent
3915c89fb6
commit
459488bba2
|
@ -40,7 +40,7 @@ jobs:
|
|||
- name: Lint tinygrad with pylint
|
||||
run: pylint tinygrad/
|
||||
- name: Run mypy
|
||||
run: mypy tinygrad/ --ignore-missing-imports --check-untyped-defs --warn-unreachable
|
||||
run: mypy tinygrad/ --ignore-missing-imports --check-untyped-defs --explicit-package-bases --warn-unreachable
|
||||
|
||||
testcpu:
|
||||
name: CPU Tests
|
||||
|
|
|
@ -54,9 +54,11 @@ confidence=
|
|||
# --enable=similarities". If you want to run only the classes checker, but have
|
||||
# no Warning level messages displayed, use"--disable=all --enable=classes
|
||||
# --disable=W"
|
||||
disable=C,R,W0613,W0511,W0212,W0201,W0106,W0603,W0621,W0703,W1201,W1203,E1136,W1514,E1101,W0221,W0632
|
||||
disable=C,R,W0613,W0511,W0212,W0201,W0106,W0603,W0621,W0703,W1201,W1203,E1136,W1514,E1101,W0221,W0105,E0401
|
||||
# E1101 for mlops binding
|
||||
# W0221,W0632 for Function class
|
||||
# W0221 for Function class
|
||||
# W0105 for comment strings
|
||||
# E0401 for missing imports
|
||||
|
||||
# Enable the message, report, category or checker with the given id(s). You can
|
||||
# either give multiple identifier separated by comma (,) or put this option
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
from tinygrad import tensor, nn # noqa: F401
|
|
@ -151,7 +151,7 @@ class LLVMCodegen(ASTKernel):
|
|||
return m
|
||||
if isinstance(x.op, ReduceOps):
|
||||
if reduce_result is None:
|
||||
raise Exception("no reduce")
|
||||
raise RuntimeError("no reduce")
|
||||
return reduce_result
|
||||
values = [ast_parse(builder, v, level, reduce_result) for v in x.src]
|
||||
|
||||
|
|
|
@ -23,10 +23,10 @@ def einsum_mulacc(einsum, get_strides, expand):
|
|||
return mulacc
|
||||
|
||||
numpy_fxn_for_op : Dict[Op, Callable] = {**base_fxn_for_op, **{
|
||||
UnaryOps.NOOP: lambda x: np.ascontiguousarray(x), UnaryOps.EXP: lambda x: np.exp(x), UnaryOps.LOG: lambda x: np.log(x),
|
||||
UnaryOps.NOOP: np.ascontiguousarray, UnaryOps.EXP: np.exp, UnaryOps.LOG: np.log,
|
||||
BinaryOps.MAX: np.maximum, BinaryOps.CMPEQ: lambda x,y: (x==y).astype(np.float32),
|
||||
MovementOps.FLIP: lambda x, axis: np.flip(x, axis), MovementOps.PERMUTE: lambda x, order: x.transpose(order),
|
||||
MovementOps.PAD: lambda x, padding: np.pad(x, padding), MovementOps.EXPAND: lambda x, new_shape: np.broadcast_to(x, new_shape),
|
||||
MovementOps.FLIP: np.flip, MovementOps.PERMUTE: lambda x, order: x.transpose(order),
|
||||
MovementOps.PAD: np.pad, MovementOps.EXPAND: np.broadcast_to,
|
||||
FusedOps.MULACC: einsum_mulacc(lambda s,a,b: np.einsum(s, a.copy(), b.copy()), lambda x: x.strides, np.broadcast_to)
|
||||
}}
|
||||
|
||||
|
@ -35,4 +35,4 @@ class CPUBuffer(InterpretedBuffer):
|
|||
|
||||
@staticmethod
|
||||
def fromCPU(x): return CPUBuffer(x)
|
||||
def toCPU(x): return x._buf
|
||||
def toCPU(self): return self._buf
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Optional
|
||||
import pycuda.autoprimaryctx # type: ignore # pylint: disable=unused-import # noqa: F401
|
||||
import pycuda.driver as cuda # type: ignore
|
||||
from pycuda.compiler import compile # type: ignore
|
||||
from pycuda.compiler import compile as cuda_compile # type: ignore
|
||||
import numpy as np
|
||||
from tinygrad.helpers import DEBUG
|
||||
from tinygrad.ops import CompiledBuffer, RawBufferCopyInOut
|
||||
|
@ -14,7 +14,7 @@ class RawCUDABuffer(RawBufferCopyInOut):
|
|||
|
||||
class CUDAProgram:
|
||||
def __init__(self, name:str, prg:str, binary=False):
|
||||
if not binary: prg = compile(prg, target="ptx", no_extern_c=True).decode('utf-8')
|
||||
if not binary: prg = cuda_compile(prg, target="ptx", no_extern_c=True).decode('utf-8')
|
||||
if DEBUG >= 5: print(prg)
|
||||
# TODO: name is wrong
|
||||
self.prg = cuda.module_from_buffer(prg.encode('utf-8')).get_function(prg.split(".visible .entry ")[1].split("(")[0])
|
||||
|
|
|
@ -46,7 +46,7 @@ class CLBuffer(RawBufferCopyInOut):
|
|||
def copyin(self, x:np.ndarray): cl.enqueue_copy(CL.cl_queue, self._cl, x, is_blocking=False)
|
||||
def copyout(self, x:np.ndarray): cl.enqueue_copy(CL.cl_queue, x, self._cl, is_blocking=True)
|
||||
|
||||
class CLImage(RawBuffer):
|
||||
class CLImage(RawBuffer): # pylint: disable=abstract-method
|
||||
fmt : Final = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.HALF_FLOAT if FLOAT16 else cl.channel_type.FLOAT)
|
||||
IMAGE : Final = True
|
||||
|
||||
|
|
|
@ -16,5 +16,5 @@ class TorchBuffer(InterpretedBuffer):
|
|||
fxn_for_op : ClassVar = torch_fxn_for_op
|
||||
|
||||
@staticmethod
|
||||
def fromCPU(data): return TorchBuffer(torch.from_numpy(data).requires_grad_(False).to(device))
|
||||
def toCPU(x): return x._buf.cpu().numpy()
|
||||
def fromCPU(x): return TorchBuffer(torch.from_numpy(x).requires_grad_(False).to(device))
|
||||
def toCPU(self): return self._buf.cpu().numpy()
|
||||
|
|
Loading…
Reference in New Issue