Skip to content

Commit

Permalink
Make main test file work
Browse files Browse the repository at this point in the history
  • Loading branch information
tostenzel committed Jan 2, 2024
1 parent 1e1d02e commit ef15da9
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 15 deletions.
Empty file added tests/__init__.py
Empty file.
50 changes: 50 additions & 0 deletions tests/gradcheck.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import numpy as np
from edugrad.tensor import Tensor

def mask_like(like, mask_inx, mask_value = 1.0):
mask = np.zeros_like(like).reshape(-1)
mask[mask_inx] = mask_value
return mask.reshape(like.shape)

def jacobian(func, input):
output = func(input)

ji = input.numpy().reshape(-1).shape[-1]
jo = output.numpy().reshape(-1).shape[-1]
J = np.zeros((jo,ji), dtype=np.float32)

for o in range(jo):
input.grad = None
output = func(input)

# tinygrad doesn't support slicing, tiny-hack to select
# the needed scalar an backpropagate only through it
o_scalar = Tensor(mask_like(output.numpy(), o, 1.)).mul(output).sum()
o_scalar.backward()

for i, grad in enumerate(input.grad.numpy().reshape(-1)):
J[o,i] = grad
return J

def numerical_jacobian(func, input, eps = 1e-3):
output = func(input)

ji = input.numpy().reshape(-1).shape[-1]
jo = output.numpy().reshape(-1).shape[-1]
NJ = np.zeros((jo, ji), dtype=np.float32)

for i in range(ji):
eps_perturb = mask_like(input.numpy(), i, mask_value = eps)

output_perturb_add = func(Tensor(input.numpy() + eps_perturb)).numpy().reshape(-1)
output_perturb_sub = func(Tensor(input.numpy() - eps_perturb)).numpy().reshape(-1)

grad_approx = ((output_perturb_add) - (output_perturb_sub)) / (2*eps)

NJ[:,i] = grad_approx
return NJ

def gradcheck(func, input, eps = 1e-3, atol = 1e-3, rtol = 1e-3):
NJ = numerical_jacobian(func, input, eps)
J = jacobian(func, input)
return np.allclose(J, NJ, atol = atol, rtol = rtol)
2 changes: 0 additions & 2 deletions tests/test_dummy.py

This file was deleted.

17 changes: 4 additions & 13 deletions tests/test_tensor.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import numpy as np
import torch
import unittest, copy
import mmap
from edugrad import Tensor
from edugrad.helpers import dtypes
#from tinygrad.helpers import temp
#from extra.gradcheck import numerical_jacobian, jacobian, gradcheck

from tests.gradcheck import numerical_jacobian, jacobian, gradcheck

x_init = np.random.randn(1,3).astype(np.float32)
U_init = np.random.randn(3,3).astype(np.float32)
Expand Down Expand Up @@ -99,16 +99,7 @@ def test_nograd(self):
assert mm.grad is not None
assert W.grad is not None

"""
def test_dropout(self):
with Tensor.train():
n, rate = 1_000_000, 0.1
w = Tensor.ones(n).dropout(rate)
non_zeros = np.count_nonzero(w.numpy())
expected = n * (1 - rate)
np.testing.assert_allclose(non_zeros, expected, rtol=2e-3)

def test_jacobian(self):
W = np.random.RandomState(42069).random((10, 5)).astype(np.float32)
x = np.random.RandomState(69420).random((1, 10)).astype(np.float32)
Expand Down Expand Up @@ -139,7 +130,7 @@ def tiny_func(x): return x.dot(tiny_W).relu().log_softmax()

# coarse approx. since a "big" eps and the non-linearities of the model
self.assertFalse(gradcheck(tiny_func, tiny_x, eps = 1e-5))
"""




Expand Down

0 comments on commit ef15da9

Please sign in to comment.