1
0
Fork 0
tinygrab/test/test_randomness.py

252 lines
8.4 KiB
Python
Raw Permalink Normal View History

import math
import unittest
import numpy as np
import torch
from tinygrad.tensor import Tensor
2023-06-01 22:34:24 -06:00
import tinygrad.nn as nn
from tinygrad.helpers import dtypes
from functools import partial
CI < 5 minutes (#1252) * models matrix * fix typo and install gpu deps * install llvm deps if needed * fix * testops with cuda * remove pip cache since not work * cuda env * install cuda deps * maybe it will work now * i can't read * all tests in matrix * trim down more * opencl stuff in matrix * opencl pip cache * test split * change cuda test exclusion * test * fix cuda maybe * add models * add more n=auto * third thing * fix bug * cache pip more * change name * update tests * try again cause why not * balance * try again... * try apt cache for cuda * try on gpu: * try cuda again * update packages step * replace libz-dev with zlib1g-dev * only cache cuda * why error * fix gpuocelot bug * apt cache err * apt cache to slow? * opt and image in single runner * add a couple n=autos * remove test matrix * try cuda apt cache again * libz-dev -> zlib1g-dev * remove -s since not supported by xdist * the cache takes too long and doesn't work * combine webgpu and metal tests * combine imagenet to c and cpu tests * torch tests with linters * torch back by itself * small windows clang test with torch tests * fix a goofy windows bug * im dumb * bro * clang with linters * fix pylint error * linter not work on windows * try with clang again * clang and imagenet? * install deps * fix * fix quote * clang by itself (windows too slow) * env vars for imagenet * cache pip for metal and webgpu tests * try torch with metal and webgpu * doesn't work, too long * remove -v * try -n=logical * don't use logical * revert accidental thing * remove some prints unless CI * fix print unless CI * ignore speed tests for slow tests * clang windows in matrix (ubuntu being tested in imagenet->c test) * try manual pip cache * fix windows pip cache path * all manual pip cache * fix pip cache dir for macos * print_ci function in helpers * CI as variable, no print_ci * missed one * cuda tests with docker image * remove setup-python action for cuda * python->python3? * remove -s -v * try fix pip cache * maybe fix * try to fix pip cache * is this the path? * maybe cache pip * try again * create wheels dir * ? * cuda pip deps in dockerfile * disable pip cache for clang * image from ghcr instead of docker hub * why is clang like this * fast deps * try use different caches * remove the fast thing * try with lighter image * remove setup python for cuda * small docker and cuda fast deps * ignore a few more tests * cool docker thing (maybe) * oops * quotes * fix docker command * fix bug * ignore train efficientnet test * remove dockerfile (docker stuff takes too long) * remove docker stuff and normal cuda * oops * ignore the tests for cuda * does this work * ignore test_train on slow backends * add space * llvm ignore same tests as cuda * nvm * ignore lr scheduler tests * get some stats * fix ignore bug * remove extra ' * remove and * ignore test for llvm * change ignored tests and durationon all backends * fix * and -> or * ignore some more cuda tests * finally? * does this fix it * remove durations=0 * add some more tests to llvm * make last pytest more readable * fix * don't train efficientnet on cpu * try w/out pip cache * pip cache seems to be generally better * pytest file markers * try apt fast for cuda * use quick install for apt-fast * apt-fast not worth * apt-get to apt * fix typo * suppress warnings * register markers * disable debug on fuzz tests * change marker names * apt update and apt install in one command * update marker names in test.yml * webgpu pytest marker
2023-07-23 14:00:56 -06:00
2023-12-04 22:01:04 -07:00
# https://gist.github.com/devries/11405101
def ksprob(a):
2023-12-04 22:01:04 -07:00
fac, total, termbf = 2.0, 0.0, 0.0
a2 = -2.0 * a * a
for j in range(1, 101):
term = fac * math.exp(a2 * j * j)
total += term
if math.fabs(term) <= 0.001 * termbf or math.fabs(term) <= 1e-8 * total:
return total
fac = -fac
termbf = math.fabs(term)
return 1.0
2023-12-04 22:01:04 -07:00
def kstest(l1, l2):
n1, n2 = len(l1), len(l2)
l1.sort()
l2.sort()
j1, j2, d, fn1, fn2 = 0, 0, 0.0, 0.0, 0.0
while j1 < n1 and j2 < n2:
d1, d2 = l1[j1], l2[j2]
if d1 <= d2:
fn1 = (float(j1) + 1.0) / float(n1)
j1 += 1
if d2 <= d1:
fn2 = (float(j2) + 1.0) / float(n2)
j2 += 1
dtemp = math.fabs(fn2 - fn1)
if dtemp > d:
d = dtemp
ne = float(n1 * n2) / float(n1 + n2)
nesq = math.sqrt(ne)
prob = ksprob((nesq + 0.12 + 0.11 / nesq) * d)
return prob
def equal_distribution(
tiny_func, torch_func=None, numpy_func=None, shape=(20, 23), alpha=0.05
):
Tensor.manual_seed(1337)
torch.manual_seed(1337)
np.random.seed(1337)
assert not (
torch_func is None and numpy_func is None
), "no function to compare with"
x = tiny_func(*shape).numpy().flatten()
if numpy_func is not None:
y = numpy_func(shape).flatten()
if torch_func is not None:
z = torch_func(shape).numpy().flatten()
return (numpy_func is None or kstest(x, y) >= alpha) and (
torch_func is None or kstest(x, z) >= alpha
)
2023-12-04 22:01:04 -07:00
def normal_test(func, shape=(20, 23), alpha=0.05):
return equal_distribution(
func, numpy_func=lambda x: np.random.randn(*x), shape=shape, alpha=alpha
)
2023-12-04 22:01:04 -07:00
class TestRandomness(unittest.TestCase):
def test_rand(self):
self.assertFalse(normal_test(Tensor.rand))
self.assertTrue(
equal_distribution(Tensor.rand, torch.rand, lambda x: np.random.rand(*x))
)
def test_randn(self):
self.assertTrue(normal_test(Tensor.randn))
self.assertTrue(
equal_distribution(Tensor.randn, torch.randn, lambda x: np.random.randn(*x))
)
def test_normal(self):
self.assertTrue(normal_test(Tensor.normal))
self.assertTrue(
equal_distribution(
Tensor.normal,
lambda x: torch.nn.init.normal_(torch.empty(x), mean=0, std=1),
lambda x: np.random.normal(loc=0, scale=1, size=x),
)
)
def test_uniform(self):
self.assertFalse(normal_test(Tensor.uniform))
self.assertTrue(
equal_distribution(
Tensor.uniform,
lambda x: torch.nn.init.uniform_(torch.empty(x)),
lambda x: np.random.uniform(size=x),
)
)
self.assertTrue(
equal_distribution(
partial(Tensor.uniform, low=-100, high=100, dtype=dtypes.int32),
numpy_func=lambda x: np.random.randint(low=-100, high=100, size=x),
)
)
def test_scaled_uniform(self):
self.assertFalse(normal_test(Tensor.scaled_uniform))
self.assertTrue(
equal_distribution(
Tensor.scaled_uniform,
lambda x: torch.nn.init.uniform_(torch.empty(x), a=-1, b=1)
/ math.sqrt(math.prod(x)),
lambda x: np.random.uniform(-1, 1, size=x) / math.sqrt(math.prod(x)),
)
)
def test_glorot_uniform(self):
self.assertFalse(normal_test(Tensor.glorot_uniform))
self.assertTrue(
equal_distribution(
Tensor.glorot_uniform,
lambda x: torch.nn.init.xavier_uniform_(torch.empty(x)),
lambda x: np.random.uniform(-1, 1, size=x)
* math.sqrt(6 / (x[0] + math.prod(x[1:]))),
)
)
def test_kaiming_uniform(self):
Tensor.manual_seed(1337)
torch.manual_seed(1337)
np.random.seed(1337)
for shape in [(128, 64, 3, 3), (20, 24)]:
self.assertTrue(
equal_distribution(
Tensor.kaiming_uniform,
lambda x: torch.nn.init.kaiming_uniform_(torch.empty(x)),
shape=shape,
)
)
def test_kaiming_normal(self):
Tensor.manual_seed(1337)
torch.manual_seed(1337)
np.random.seed(1337)
for shape in [(128, 64, 3, 3), (20, 24)]:
self.assertTrue(
equal_distribution(
Tensor.kaiming_normal,
lambda x: torch.nn.init.kaiming_normal_(torch.empty(x)),
shape=shape,
)
)
def test_multinomial(self):
self.assertRaises(
AssertionError, lambda: Tensor(2).multinomial(1, replacement=False)
)
self.assertRaises(
AssertionError, lambda: Tensor([1, 9]).multinomial(0, replacement=False)
)
def _check_with_torch(w, num_samples, replacement):
tiny_res = Tensor(w).multinomial(num_samples, replacement=replacement)
torch_res = torch.tensor(w).multinomial(
num_samples, replacement=replacement
)
self.assertEqual(tiny_res.shape, torch_res.shape)
if torch_res.ndim == 1:
tiny_res = tiny_res.unsqueeze(0)
torch_res = torch_res.unsqueeze(0)
for i in range(torch_res.shape[0]):
self.assertTrue(
equal_distribution(lambda *_: tiny_res[i], lambda _: torch_res[i])
)
_check_with_torch(w=[0.231, 0.0, 1.0, 0.5], num_samples=2000, replacement=True)
_check_with_torch(
w=[[0.2, 0.8]], num_samples=2000, replacement=True
) # 2D but only 1 row
_check_with_torch(
w=[[0.453, 0.0, 1.0, 0.81], [0.1, 0.8, 0.0, 0.1]],
num_samples=2000,
replacement=True,
)
# no-replacement isn't supported, unless taking only one sample
w = [0.1, 0.9]
self.assertRaises(
AssertionError, lambda: Tensor(w).multinomial(100, replacement=False)
)
tiny_samples = [
Tensor(w).multinomial(1, replacement=False).numpy().item()
for _ in range(1000)
]
torch_samples = [
torch.tensor(w).multinomial(1, replacement=False).item()
for _ in range(1000)
]
self.assertTrue(
equal_distribution(
lambda *_: Tensor(tiny_samples), lambda _: torch.tensor(torch_samples)
)
)
def test_multinomial_counterexample(self):
tiny_res = Tensor([0.3, 0.6, 0.1]).multinomial(2000, replacement=True)
torch_res = torch.tensor([0.3, 0.6, 0.1]).multinomial(2000, replacement=True)
self.assertTrue(equal_distribution(lambda *_: tiny_res, lambda _: torch_res))
torch_res = torch.tensor([0.2, 0.7, 0.1]).multinomial(2000, replacement=True)
self.assertFalse(equal_distribution(lambda *_: tiny_res, lambda _: torch_res))
def test_conv2d_init(self):
params = (128, 256, (3, 3))
assert equal_distribution(
lambda *_: nn.Conv2d(*params).weight,
lambda _: torch.nn.Conv2d(*params).weight.detach(),
)
assert equal_distribution(
lambda *_: nn.Conv2d(*params).bias,
lambda _: torch.nn.Conv2d(*params).bias.detach(),
)
def test_linear_init(self):
params = (64, 64)
assert equal_distribution(
lambda *_: nn.Linear(*params).weight,
lambda _: torch.nn.Linear(*params).weight.detach(),
)
assert equal_distribution(
lambda *_: nn.Linear(*params).bias,
lambda _: torch.nn.Linear(*params).bias.detach(),
)
def test_bn_init(self):
params = (64,)
assert equal_distribution(
lambda *_: nn.BatchNorm2d(*params).weight,
lambda _: torch.nn.BatchNorm2d(*params).weight.detach(),
)
assert equal_distribution(
lambda *_: nn.BatchNorm2d(*params).bias,
lambda _: torch.nn.BatchNorm2d(*params).bias.detach(),
)
2023-06-01 22:34:24 -06:00
if __name__ == "__main__":
2023-12-04 22:01:04 -07:00
unittest.main()