mirror of https://github.com/commaai/tinygrad.git
fix tests, test bn evaluate too
This commit is contained in:
parent
2f17d151b3
commit
643e8b0388
|
@ -24,9 +24,10 @@ def print_objects():
|
|||
bb = gc.get_referrers(tb)
|
||||
for b in bb:
|
||||
if b is not gpubuffers and b is not gpubuffers_orphaned:
|
||||
print(tb, "\nreference", type(b), len(b), str(b)[0:150], "\n\n")
|
||||
print(tb, "\nreference", type(b), len(b), str(b)[0:150])
|
||||
for x in gc.get_referrers(b):
|
||||
print(str(x)[0:100])
|
||||
print("double reference", str(x)[0:100])
|
||||
print("\n")
|
||||
if cnt == 10:
|
||||
break
|
||||
cnt += 1
|
||||
|
|
|
@ -40,7 +40,7 @@ class TinyConvNet:
|
|||
def parameters(self):
|
||||
return optim.get_parameters(self)
|
||||
|
||||
def forward(self, x):
|
||||
def forward(self, x:Tensor):
|
||||
x = x.reshape(shape=(-1, 1, 28, 28)) # hacks
|
||||
x = self.bn1(x.conv2d(self.c1)).relu().max_pool2d()
|
||||
x = self.bn2(x.conv2d(self.c2)).relu().max_pool2d()
|
||||
|
@ -99,7 +99,7 @@ class TestMNIST(unittest.TestCase):
|
|||
model = TinyConvNet(has_batchnorm=True)
|
||||
optimizer = optim.Adam(model.parameters(), lr=0.001)
|
||||
train(model, X_train, Y_train, optimizer, steps=100)
|
||||
# TODO: batchnorm doesn't work!!!
|
||||
assert evaluate(model, X_test, Y_test) > 0.7 # TODO: batchnorm doesn't work!!!
|
||||
|
||||
def test_sgd(self):
|
||||
np.random.seed(1337)
|
||||
|
|
|
@ -11,7 +11,6 @@ class Optimizer:
|
|||
|
||||
self.params : List[Tensor] = [x for x in params if x.requires_grad]
|
||||
self.buffers : List[Tensor] = [x for x in params if not x.requires_grad] # buffers are still realized
|
||||
self.realize()
|
||||
|
||||
# TODO: this probably shouldn't change the gradients, just the ones used by the optimizer
|
||||
def clipnorm(self, amount=1):
|
||||
|
|
Loading…
Reference in New Issue