mirror of https://github.com/commaai/tinygrad.git
make self referential AST fast too (#2278)
* cleanup * linter * linter * linter * rm .buffers * linter * linter * huh? * cleanup * typo * min diff * property * rev * linter * no matel hack * minimal properties * line * checkout master * copy_to_device * idk * revert * type * type * faast * speed test * cleanup test * softer test * monotonic * harder test * clean code * cleanup
This commit is contained in:
parent
4f7b1ac0d2
commit
cff8375aa2
|
@ -3,10 +3,13 @@ from tinygrad.tensor import Tensor
|
|||
|
||||
# stuff needed to unpack a kernel
|
||||
from tinygrad.ops import LazyOp, TernaryOps, BinaryOps, UnaryOps, ReduceOps, BufferOps, MemBuffer, ConstBuffer
|
||||
from tinygrad.lazy import LazyBuffer
|
||||
from tinygrad.helpers import dtypes
|
||||
from tinygrad.shape.shapetracker import ShapeTracker
|
||||
from tinygrad.shape.view import View
|
||||
from tinygrad.shape.symbolic import Variable
|
||||
import numpy as np
|
||||
import time
|
||||
inf, nan = float('inf'), float('nan')
|
||||
|
||||
class TestLazyOp(unittest.TestCase):
|
||||
|
@ -17,5 +20,13 @@ class TestLazyOp(unittest.TestCase):
|
|||
ast_remade = eval(str(ast))
|
||||
self.assertEqual(ast, ast_remade)
|
||||
|
||||
def test_selfreferential_speed(self):
|
||||
st = time.monotonic()
|
||||
for i in range(25):
|
||||
p = LazyBuffer.fromCPU(np.array([1]))
|
||||
for _ in range(i): p = p.e(BinaryOps.ADD, p)
|
||||
# sanity check if caching works this should be way faster
|
||||
assert time.monotonic() -st < 0.5, f"{i}"
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
|||
import importlib, inspect, functools, pathlib, re
|
||||
from enum import Enum, auto
|
||||
from typing import TYPE_CHECKING, Union, Type, Tuple, Any, List, Optional, Dict, Callable, Mapping, cast
|
||||
from tinygrad.helpers import ansilen, prod, DEBUG, getenv, GlobalCounters, DType, colored, BEAM, NOOPT, all_int
|
||||
from tinygrad.helpers import ansilen, prod, DEBUG, getenv, GlobalCounters, DType, colored, BEAM, NOOPT, dedup, all_int
|
||||
from tinygrad.runtime.lib import RawBuffer
|
||||
from tinygrad.shape.symbolic import Variable, sym_infer, NumNode
|
||||
from dataclasses import dataclass
|
||||
|
@ -52,13 +52,11 @@ class LazyOp:
|
|||
src: Tuple[Union[LazyOp, LazyBuffer], ...]
|
||||
arg: Any = None
|
||||
def __repr__(self): return f"LazyOp(op={self.op}, src={self.src}, arg={self.arg})"
|
||||
@property
|
||||
def buffers(self):
|
||||
buffers: Tuple[Union[LazyOp, LazyBuffer], ...] = ()
|
||||
try: # NOTE: the linearizer's key function maps the buffers to ints, and LOCAL_BUFFER is used. we don't care about buffers in these cases
|
||||
for x in self.src: buffers += x.buffers
|
||||
except AttributeError: buffers = ()
|
||||
return buffers
|
||||
@functools.cached_property
|
||||
def buffers(self) -> Tuple[LazyBuffer, ...]: return tuple(dedup(sum([x.buffers for x in self.src], ())))
|
||||
@functools.cached_property
|
||||
def hash(self): return hash((self.op,self.src, self.arg))
|
||||
def __hash__(self): return self.hash
|
||||
|
||||
@property
|
||||
def key(self): return (self.op, tuple(map(lambda x: getattr(x, "key", x), self.src)), getattr(self.arg, "key", self.arg))
|
||||
|
|
Loading…
Reference in New Issue