should fix tests

This commit is contained in:
George Hotz 2022-07-03 16:06:11 -07:00
parent 71a812fbf2
commit e6e43e820e
3 changed files with 8 additions and 4 deletions

View File

@ -20,6 +20,8 @@ def run_onnx_torch(onnx_model, inputs):
OPENPILOT_MODEL = "https://github.com/commaai/openpilot/raw/7da48ebdba5e3cf4c0b8078c934bee9a199f0280/selfdrive/modeld/models/supercombo.onnx"
#OPENPILOT_MODEL = "https://github.com/commaai/openpilot/raw/1f2f9ea9c9dc37bdea9c6e32e4cb8f88ea0a34bf/selfdrive/modeld/models/supercombo.onnx"
np.random.seed(1337)
class TestOnnxModel(unittest.TestCase):
def test_benchmark_openpilot_model(self):
dat = fetch(OPENPILOT_MODEL)

View File

@ -17,7 +17,7 @@ class CPUBuffer(np.ndarray):
def flip(x, axis): return np.flip(x, axis)
def amax(x, *args, **kwargs): return np.amax(x, *args, **kwargs)
def permute(x, order): return x.transpose(order)
def custompad(x, padding): return np.pad(x, padding).view(CPUBuffer) if any(x > 0 or y > 0 for x,y in padding) else x
def custompad(x, padding): return np.pad(x, padding).view(CPUBuffer) if any(x != 0 or y != 0 for x,y in padding) else x
def expand(x, new_shape): return np.broadcast_to(x, new_shape).view(CPUBuffer)
@staticmethod
@ -48,7 +48,7 @@ class CPUBuffer(np.ndarray):
def processing_op(x,op,w,C):
assert op == ProcessingOps.CONV, f"{op} isn't supported"
x = x.movement_op(MovementOps.SLICE, ((0, x.shape[0]), (0, x.shape[1]), (-C.py, x.shape[2]+C.py_), (-C.px, x.shape[3]+C.px_)))
gx = x.reshape(C.bs,C.groups,C.cin,x.shape[2],x.shape[3])
gx = x.ravel().reshape(C.bs,C.groups,C.cin,x.shape[2],x.shape[3])
tx = np.lib.stride_tricks.as_strided(gx,
shape=(C.bs, C.groups, C.cin, C.oy, C.ox, C.H, C.W),
strides=(*gx.strides[0:3], gx.strides[3]*C.sy, gx.strides[4]*C.sx, gx.strides[3]*C.dy, gx.strides[4]*C.dx),

View File

@ -1,6 +1,7 @@
from __future__ import annotations
from enum import Enum
from typing import Optional, Tuple, NamedTuple, Union, Any, List, Dict, Type
from copy import copy
import sys, functools, operator
from tinygrad.helpers import ConvArgs
from tinygrad.shapetracker import ShapeTracker
@ -156,7 +157,7 @@ class LazyBuffer:
@staticmethod
def fromCPU(x, device):
return LazyBuffer(device, x.shape, LoadOps, LazyOp(LoadOps.FROMCPU, tuple(), x))
return LazyBuffer(device, x.shape, LoadOps, LazyOp(LoadOps.FROMCPU, tuple(), x.copy()))
def toCPU(x):
return x.realize().toCPU()
@ -169,8 +170,9 @@ class LazyBuffer:
def movement_op(x:LazyBuffer, op:MovementOps, arg) -> LazyBuffer:
# if a MovementOp is applied to a MovementOp, merge them and use one buffer
# TODO: look into why that copy is needed
ret = LazyBuffer(x.device, ShapeTracker(x.st).movement_op(op, arg), MovementOps,
LazyOp(op, (x.op if MERGE_MOVEMENT_OPS and x.optype == MovementOps and x.realized is None else x,), arg))
LazyOp(op, (x.op if MERGE_MOVEMENT_OPS and x.optype == MovementOps and x.realized is None else x,), copy(arg)))
if REMOVE_MOVEMENT_NOPS and x.realized is None and ret.st.contiguous:
root = get_lazybuffers(ret.op)[0]