numpy device + pickle it (#4120)

This commit is contained in:
George Hotz 2024-04-09 13:19:30 -07:00 committed by GitHub
parent 1ef9c50fd7
commit ae849d12d7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 22 additions and 19 deletions

View File

@ -7,7 +7,7 @@ def multidevice_test(fxn):
exclude_devices = getenv("EXCLUDE_DEVICES", "").split(",")
def ret(self):
for device in Device._devices:
if device in ["DISK", "EXT", "FAKE"]: continue
if device in ["DISK", "NPY", "FAKE"]: continue
if not CI: print(device)
if device in exclude_devices:
if not CI: print(f"WARNING: {device} test is excluded")

View File

@ -15,6 +15,12 @@ class TestPickle(unittest.TestCase):
t2:Tensor = pickle.loads(st)
np.testing.assert_equal(t.numpy(), t2.numpy())
def test_pickle_numpy(self):
t = Tensor(np.array([1,2,3,4.]))
st = pickle.dumps(t)
t2:Tensor = pickle.loads(st)
np.testing.assert_equal(t.numpy(), t2.numpy())
@unittest.expectedFailure
def test_pickle_jit(self):
@TinyJit

View File

@ -350,7 +350,7 @@ class TestSchedule(unittest.TestCase):
def test_double_from(self):
x = Tensor([1,2,3,4])
out = x.to('ext')
out = x.to('npy')
check_schedule(out, 0, filter_loadops=False)
def test_pow_const_tensor_simplified(self):

View File

@ -29,6 +29,7 @@ class Buffer:
return self
def __reduce__(self):
buf = None
if self.device == "NPY": return self.__class__, (self.device, self.size, self.dtype, self._buf, self.options)
if hasattr(self, '_buf'):
buf = bytearray(self.nbytes)
self.copyout(memoryview(buf))

View File

@ -1,12 +0,0 @@
from typing import Tuple, Any
from tinygrad.device import Compiled, Allocator
# the Any is an arbitrary object that's kept in scope with the memoryview
class ExtAllocator(Allocator):
# NOTE: this doesn't work with allow_zero_copy, it's read only somehow
#def as_buffer(self, src:Tuple[memoryview, Any]) -> memoryview: return src[0]
def copyin(self, dest:Tuple[memoryview, Any], src:memoryview): dest[0][:] = src
def copyout(self, dest:memoryview, src:Tuple[memoryview, Any]): dest[:] = src[0]
class ExtDevice(Compiled):
def __init__(self, device:str): super().__init__(device, ExtAllocator(), None, None)

View File

@ -0,0 +1,9 @@
import numpy as np
from tinygrad.helpers import flat_mv
from tinygrad.device import Compiled, Allocator
class NpyAllocator(Allocator):
def copyout(self, dest:memoryview, src:np.ndarray): dest[:] = flat_mv(np.require(src, requirements='C').data)
class NpyDevice(Compiled):
def __init__(self, device:str): super().__init__(device, NpyAllocator(), None, None)

View File

@ -7,8 +7,7 @@ from collections import defaultdict
import numpy as np
from tinygrad.dtype import DType, dtypes, ImageDType, ConstType, least_upper_float, least_upper_dtype
from tinygrad.helpers import argfix, make_pair, flatten, prod, all_int, round_up, merge_dicts, fully_flatten, flat_mv, argsort
from tinygrad.helpers import IMAGE, DEBUG, WINO, THREEFRY
from tinygrad.helpers import argfix, make_pair, flatten, prod, all_int, round_up, merge_dicts, fully_flatten, argsort, IMAGE, DEBUG, WINO, THREEFRY
from tinygrad.lazy import LazyBuffer
from tinygrad.features.multi import MultiLazyBuffer
from tinygrad.ops import LoadOps
@ -45,9 +44,9 @@ def _loadop(op, shape:Tuple[sint,...], dtype:DType, device:Union[str, Tuple[str,
return MultiLazyBuffer([LazyBuffer.loadop(op, shape, dtype, d, arg, src) for d in device], None)
def _fromcpu(x: np.ndarray) -> LazyBuffer:
ret = LazyBuffer.loadop(LoadOps.EMPTY, x.shape, dtypes.from_np(x.dtype), "EXT")
ret = LazyBuffer.loadop(LoadOps.EMPTY, x.shape, dtypes.from_np(x.dtype), "NPY")
# fake realize
ret.buffer.allocate((memoryview(bytearray()), None) if x.size == 0 else (flat_mv(np.require(x, requirements='C').data), x))
ret.buffer.allocate(x)
del ret.srcs
return ret
@ -155,7 +154,7 @@ class Tensor:
def assign(self, x) -> Tensor:
# TODO: this is a hack for writing to DISK. remove with working assign
if isinstance(self.device, str) and self.device.startswith("DISK"):
if x.__class__ is not Tensor: x = Tensor(x, device="EXT", dtype=self.dtype)
if x.__class__ is not Tensor: x = Tensor(x, device="NPY", dtype=self.dtype)
self.contiguous().realize().lazydata.base.realized.copyin(x.numpy().data)
return self
if x.__class__ is not Tensor: x = Tensor(x, device=self.device, dtype=self.dtype)