tinygrad/test/test_gc.py

68 lines
2.0 KiB
Python
Raw Normal View History

2020-12-07 02:34:40 +08:00
#!/usr/bin/env python
import gc
2020-12-07 02:34:40 +08:00
import unittest
2023-02-27 22:53:18 +08:00
import numpy as np
from tinygrad.device import Buffer
from tinygrad.engine.realize import run_schedule
from tinygrad.tensor import Tensor
2020-12-07 02:34:40 +08:00
def tensors_allocated():
2020-12-09 18:52:28 +08:00
return sum([isinstance(x, Tensor) for x in gc.get_objects()])
def bufs_allocated():
return sum([isinstance(x, Buffer) for x in gc.get_objects()])
2020-12-07 02:34:40 +08:00
class TestGC(unittest.TestCase):
2020-12-07 02:34:40 +08:00
def test_gc(self):
Tensor.manual_seed(0)
threefry again (#3785) * feat: initial xor * feat: initial threefly * feat: remove custom random * fix: really need to install precommit * feat: lmao forgot that this is rotate not a shift * clean: put that there * feat: numpy xor * feat: quick test for xor * feat: llvm xor * feat: slightly working xor in torch * feat: rand works in jit * clean: save a line * feat: match jax * feat: maybe test against jax * feat: requires_grad * fix: fix test_symbolic_ops * feat: lower alpha * feat: just pad * fix: maybe fix training tests? * fix: fix some llvm stuff * feat: cursed realize on the way out * feat: testing jax * fix: why is the jax install process not simple * fix: maybe passing test * fix: symbolic workarounds * clean: still need that precommit * fix: aaaa * fix: more test fixes * fix: quick fix for wgsl * feat: need to set requires_grad on the final tensor * feat: one more tensor * feat: don't take forever * feat: seeing y ci is brok * feat: can't allocate 64GiB lmao * fix: fix this * feat: hope this doesn't break smth before i go to bed * feat: don't destroy ram * feat: int * feat: remove jax * feat: properish workaround? * feat: skip slow webgpu tests * feat: no longer fails * feat: use dtypes * feat: real number * fix: torch * fix: don't test against reference for torch * feat: to device * feat: fix advanced indexing * feat: correct casting * feat: even rng_counter * feat: match master * feat: this was actually bad * fix: maybe? * feat: store * feat: remove realizes * feat: somehow this is important * feat: somehow this is also important * feat: save a line * fix: don't need that anymore * feat: restore this * fix: linter * feat: remove realizes * fix: realized is in base now * fix: add back cast * fix: bump deadline * fix: bump deadline * fix: bump deadline * fix: bump deadline * fix: bump deadline * fix: :( * fix: :( * fix: not being dumb * feat: try changing less tests * feat: shouldn't have to change that * feat: contiguous bumps it by one * fix: hmm * fix: numpy memory moment * fix: cl_khr_fp16 * fix: torch has different tensor count * fix: missing contiguous * hmm: hmm * fix: some fixes * fix: typing * feat: dont do that * feat: typing fixes * feat: why is this realize required? * feat: ngl kinda odd typing * feat: oh * feat: remove realizes * feat: why is this realize required? * fix: hacky patch for cudacpu * fix: without this realize pytest crashes????? * fix: shorter line * fix: cudacpu fixes * fix: cudacpu fixes * feat: real buffer * feat: don't search when searching lmao * fix: can't use contiguous things * fix: no more 100GB arrays * fix: revert * fix: skip 7 and 10 * feat: working ish beam * feat: minimize changes * feat: seed 0 stable diffusion example changed * fix: different on ci * fix: no beam * feat: make threefry optional * fix: check value * fix: unused import * feat: threefry default * fix: 5d * feat: allow non upcast div * fix: 5d better * fix: 5d better * fix: save all dtype * feat: proper error * feat: lazyop key * fix: check float * feat: try removing this realize now * feat: disable threefry for uops hip tensor cores * feat: don't need that * feat: only check upcast * fix: disable threefry for some metal tests * feat: disable for metal tensor uops as well * feat: disable for most uops * fix: disable threefry for new uops tests * feat: multitensor * fix: typing * feat: threefry default off * feat: skip threefry half rand * feat: restore old * fix: bad git * clean: ruff * feat: bfloat16 fix * fix: :| * feat: restore old --------- Co-authored-by: chenyu <chenyu@fastmail.com>
2024-03-19 04:47:07 +08:00
a = Tensor.rand(4, 4, requires_grad=True)
b = Tensor.zeros(4, 4, requires_grad=True)
2020-12-07 02:34:40 +08:00
(a*b).mean().backward()
assert (tensors_allocated() > 0)
2020-12-07 02:34:40 +08:00
del a,b
2024-10-07 02:46:58 +08:00
assert (tensors_allocated() == 2) # one for Tensor._device_rng_counters, and one for Tensor._device_seeds
Tensor.manual_seed(0)
2020-12-07 02:34:40 +08:00
def test_gc_complex(self):
Tensor.manual_seed(0)
2023-02-27 22:53:18 +08:00
a = Tensor(np.zeros((4, 4), dtype=np.float32), requires_grad=True)
threefry again (#3785) * feat: initial xor * feat: initial threefly * feat: remove custom random * fix: really need to install precommit * feat: lmao forgot that this is rotate not a shift * clean: put that there * feat: numpy xor * feat: quick test for xor * feat: llvm xor * feat: slightly working xor in torch * feat: rand works in jit * clean: save a line * feat: match jax * feat: maybe test against jax * feat: requires_grad * fix: fix test_symbolic_ops * feat: lower alpha * feat: just pad * fix: maybe fix training tests? * fix: fix some llvm stuff * feat: cursed realize on the way out * feat: testing jax * fix: why is the jax install process not simple * fix: maybe passing test * fix: symbolic workarounds * clean: still need that precommit * fix: aaaa * fix: more test fixes * fix: quick fix for wgsl * feat: need to set requires_grad on the final tensor * feat: one more tensor * feat: don't take forever * feat: seeing y ci is brok * feat: can't allocate 64GiB lmao * fix: fix this * feat: hope this doesn't break smth before i go to bed * feat: don't destroy ram * feat: int * feat: remove jax * feat: properish workaround? * feat: skip slow webgpu tests * feat: no longer fails * feat: use dtypes * feat: real number * fix: torch * fix: don't test against reference for torch * feat: to device * feat: fix advanced indexing * feat: correct casting * feat: even rng_counter * feat: match master * feat: this was actually bad * fix: maybe? * feat: store * feat: remove realizes * feat: somehow this is important * feat: somehow this is also important * feat: save a line * fix: don't need that anymore * feat: restore this * fix: linter * feat: remove realizes * fix: realized is in base now * fix: add back cast * fix: bump deadline * fix: bump deadline * fix: bump deadline * fix: bump deadline * fix: bump deadline * fix: :( * fix: :( * fix: not being dumb * feat: try changing less tests * feat: shouldn't have to change that * feat: contiguous bumps it by one * fix: hmm * fix: numpy memory moment * fix: cl_khr_fp16 * fix: torch has different tensor count * fix: missing contiguous * hmm: hmm * fix: some fixes * fix: typing * feat: dont do that * feat: typing fixes * feat: why is this realize required? * feat: ngl kinda odd typing * feat: oh * feat: remove realizes * feat: why is this realize required? * fix: hacky patch for cudacpu * fix: without this realize pytest crashes????? * fix: shorter line * fix: cudacpu fixes * fix: cudacpu fixes * feat: real buffer * feat: don't search when searching lmao * fix: can't use contiguous things * fix: no more 100GB arrays * fix: revert * fix: skip 7 and 10 * feat: working ish beam * feat: minimize changes * feat: seed 0 stable diffusion example changed * fix: different on ci * fix: no beam * feat: make threefry optional * fix: check value * fix: unused import * feat: threefry default * fix: 5d * feat: allow non upcast div * fix: 5d better * fix: 5d better * fix: save all dtype * feat: proper error * feat: lazyop key * fix: check float * feat: try removing this realize now * feat: disable threefry for uops hip tensor cores * feat: don't need that * feat: only check upcast * fix: disable threefry for some metal tests * feat: disable for metal tensor uops as well * feat: disable for most uops * fix: disable threefry for new uops tests * feat: multitensor * fix: typing * feat: threefry default off * feat: skip threefry half rand * feat: restore old * fix: bad git * clean: ruff * feat: bfloat16 fix * fix: :| * feat: restore old --------- Co-authored-by: chenyu <chenyu@fastmail.com>
2024-03-19 04:47:07 +08:00
b = Tensor.rand(4, 4, requires_grad=True)
assert (tensors_allocated() == 5)
2024-10-07 02:46:58 +08:00
(a*b).mean().backward()
assert (tensors_allocated() == 6)
2020-12-07 02:34:40 +08:00
del b
2024-10-07 02:46:58 +08:00
assert (tensors_allocated() == 4)
2023-02-27 22:53:18 +08:00
b = Tensor(np.zeros((4, 4), dtype=np.float32), requires_grad=True)
print(tensors_allocated())
2020-12-07 02:34:40 +08:00
(a*b).mean().backward()
print(tensors_allocated())
2024-10-07 02:46:58 +08:00
assert (tensors_allocated() == 6)
2020-12-07 02:34:40 +08:00
del b
2024-10-07 02:46:58 +08:00
assert (tensors_allocated() == 4)
Tensor.manual_seed(0)
2020-12-07 02:34:40 +08:00
def test_schedule_gc(self):
init = bufs_allocated()
x = Tensor.ones(256).contiguous().realize()
y = Tensor.ones(5, 5).contiguous()
y.schedule()
del x
del y
self.assertEqual(bufs_allocated()-init, 0)
def test_schedule_gc_with_inputs(self):
init = bufs_allocated()
x = Tensor.ones(256).contiguous().realize()
y = x+Tensor.ones(256).contiguous()
ys = y.schedule()
del x
run_schedule(ys)
np.testing.assert_equal(y.numpy(), np.full((256,), 2))
self.assertEqual(bufs_allocated()-init, 1)
del y
self.assertEqual(bufs_allocated()-init, 0)
2020-12-07 02:34:40 +08:00
if __name__ == '__main__':
unittest.main()