mirror of https://github.com/commaai/tinygrad.git
parent
9f77fd6135
commit
24d688c184
|
@ -1,32 +1,37 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
import gc
|
||||||
import unittest
|
import unittest
|
||||||
from tinygrad.tensor import Tensor, GPU
|
from tinygrad.tensor import Tensor, GPU
|
||||||
|
|
||||||
|
def tensors_allocated():
|
||||||
|
return sum([isinstance(x, Tensor) for x in gc.get_objects()])
|
||||||
|
|
||||||
class TestGC(unittest.TestCase):
|
class TestGC(unittest.TestCase):
|
||||||
gpu = False
|
gpu = False
|
||||||
|
|
||||||
def test_gc(self):
|
def test_gc(self):
|
||||||
a = Tensor.zeros(4,4, gpu=self.gpu)
|
a = Tensor.zeros(4,4, gpu=self.gpu)
|
||||||
b = Tensor.zeros(4,4, gpu=self.gpu)
|
b = Tensor.zeros(4,4, gpu=self.gpu)
|
||||||
(a*b).mean().backward()
|
(a*b).mean().backward()
|
||||||
assert(Tensor.allocated > 0)
|
assert(tensors_allocated() > 0)
|
||||||
del a,b
|
del a,b
|
||||||
assert(Tensor.allocated == 0)
|
assert(tensors_allocated() == 0)
|
||||||
|
|
||||||
def test_gc_complex(self):
|
def test_gc_complex(self):
|
||||||
a = Tensor.zeros(4,4, gpu=self.gpu)
|
a = Tensor.zeros(4,4, gpu=self.gpu)
|
||||||
b = Tensor.zeros(4,4, gpu=self.gpu)
|
b = Tensor.zeros(4,4, gpu=self.gpu)
|
||||||
assert(Tensor.allocated == 2)
|
assert(tensors_allocated() == 2)
|
||||||
(a*b).mean().backward()
|
(a*b).mean().backward()
|
||||||
assert(Tensor.allocated == 4)
|
assert(tensors_allocated() == 4)
|
||||||
del b
|
del b
|
||||||
assert(Tensor.allocated == 2)
|
assert(tensors_allocated() == 2)
|
||||||
b = Tensor.zeros(4,4, gpu=self.gpu)
|
b = Tensor.zeros(4,4, gpu=self.gpu)
|
||||||
print(Tensor.allocated)
|
print(tensors_allocated())
|
||||||
(a*b).mean().backward()
|
(a*b).mean().backward()
|
||||||
print(Tensor.allocated)
|
print(tensors_allocated())
|
||||||
assert(Tensor.allocated == 4)
|
assert(tensors_allocated() == 4)
|
||||||
del b
|
del b
|
||||||
assert(Tensor.allocated == 2)
|
assert(tensors_allocated() == 2)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,6 @@ class GPUBuffer:
|
||||||
class Tensor:
|
class Tensor:
|
||||||
did_float_warning = False
|
did_float_warning = False
|
||||||
default_gpu = False
|
default_gpu = False
|
||||||
allocated = 0
|
|
||||||
ops_cpu, ops_gpu = {}, {}
|
ops_cpu, ops_gpu = {}, {}
|
||||||
|
|
||||||
def __init__(self, data, gpu=None, requires_grad=True):
|
def __init__(self, data, gpu=None, requires_grad=True):
|
||||||
|
@ -85,12 +84,6 @@ class Tensor:
|
||||||
# internal variables used for autograd graph construction
|
# internal variables used for autograd graph construction
|
||||||
self._ctx = None
|
self._ctx = None
|
||||||
|
|
||||||
Tensor.allocated += 1
|
|
||||||
|
|
||||||
def __del__(self):
|
|
||||||
#print("cleanup", self.shape)
|
|
||||||
Tensor.allocated -= 1
|
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return f"Tensor {self.data!r} with grad {(self.grad.data if self.grad else None)!r}"
|
return f"Tensor {self.data!r} with grad {(self.grad.data if self.grad else None)!r}"
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue