win more lines for core library (#158)

...and sacrifice test speed
This commit is contained in:
Daulet 2020-12-08 22:18:45 +00:00 committed by GitHub
parent 9f77fd6135
commit 24d688c184
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 14 additions and 16 deletions

View File

@ -1,32 +1,37 @@
#!/usr/bin/env python
import gc
import unittest
from tinygrad.tensor import Tensor, GPU
def tensors_allocated():
return sum([isinstance(x, Tensor) for x in gc.get_objects()])
class TestGC(unittest.TestCase):
gpu = False
def test_gc(self):
a = Tensor.zeros(4,4, gpu=self.gpu)
b = Tensor.zeros(4,4, gpu=self.gpu)
(a*b).mean().backward()
assert(Tensor.allocated > 0)
assert(tensors_allocated() > 0)
del a,b
assert(Tensor.allocated == 0)
assert(tensors_allocated() == 0)
def test_gc_complex(self):
a = Tensor.zeros(4,4, gpu=self.gpu)
b = Tensor.zeros(4,4, gpu=self.gpu)
assert(Tensor.allocated == 2)
assert(tensors_allocated() == 2)
(a*b).mean().backward()
assert(Tensor.allocated == 4)
assert(tensors_allocated() == 4)
del b
assert(Tensor.allocated == 2)
assert(tensors_allocated() == 2)
b = Tensor.zeros(4,4, gpu=self.gpu)
print(Tensor.allocated)
print(tensors_allocated())
(a*b).mean().backward()
print(Tensor.allocated)
assert(Tensor.allocated == 4)
print(tensors_allocated())
assert(tensors_allocated() == 4)
del b
assert(Tensor.allocated == 2)
assert(tensors_allocated() == 2)

View File

@ -55,7 +55,6 @@ class GPUBuffer:
class Tensor:
did_float_warning = False
default_gpu = False
allocated = 0
ops_cpu, ops_gpu = {}, {}
def __init__(self, data, gpu=None, requires_grad=True):
@ -85,12 +84,6 @@ class Tensor:
# internal variables used for autograd graph construction
self._ctx = None
Tensor.allocated += 1
def __del__(self):
#print("cleanup", self.shape)
Tensor.allocated -= 1
def __repr__(self):
return f"Tensor {self.data!r} with grad {(self.grad.data if self.grad else None)!r}"