1
0
Fork 0

Apply ruff linting rules to tests (#2473)

* everything except F821

* enable F821 with noqa

* dumb fix

* fix remaining imports and (former) lambdas

* replace _ with noqa to avoid gc
pull/2485/head
Christopher Mauri Milan 2023-11-27 21:24:06 -08:00 committed by GitHub
parent 136dbd8b36
commit 7f01dd04f0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 46 additions and 70 deletions

View File

@ -1,4 +1,4 @@
tab-size = 2
indent-width = 2
select = [
"F",
@ -28,14 +28,3 @@ exclude = [
"openpilot/",
]
[per-file-ignores]
"test/*" = [
"F401",
"F403",
"F405",
"F541",
"E722",
"E731",
"F821",
"F841",
]

View File

@ -72,7 +72,7 @@ def benchmark_model(m, validate_outs=False):
from tinygrad.jit import TinyJit
tinygrad_jitted_model = TinyJit(lambda **kwargs: {k:v.realize() for k,v in tinygrad_model(kwargs).items()})
for _ in range(3): {k:v.numpy() for k,v in tinygrad_jitted_model(**inputs).items()}
benchmark(m, f"tinygrad_{device.lower()}_jit", lambda: {k:v.numpy() for k,v in tinygrad_jitted_model(**inputs).items()})
benchmark(m, f"tinygrad_{device.lower()}_jit", lambda: {k:v.numpy() for k,v in tinygrad_jitted_model(**inputs).items()}) # noqa: F821
del inputs, tinygrad_model, tinygrad_jitted_model
try:

View File

@ -76,7 +76,7 @@ def helper_test_alloc_count(mm, gen, train):
FAKE_GLOBAL_ALLOCATOR = FakeAllocator(0)
old_allocs = __helper_test_alloc_count(gen, train)
print(f"{mm}: llama: old allocs count {old_allocs}, new allocs count {new_allocs}")
assert new_allocs < old_allocs, f"Hmm, doesn't cache work any more?"
assert new_allocs < old_allocs, "Hmm, doesn't cache work any more?"
Device[Device.DEFAULT].runtime = backup_program
Device[Device.DEFAULT].buffer = backup_buffer
FAKE_GLOBAL_ALLOCATOR = None

View File

@ -4,7 +4,6 @@ import numpy as np
from tinygrad.tensor import Tensor
from tinygrad.jit import TinyJit
from tinygrad.helpers import dtypes, CI
from tinygrad import Device
from test.helpers import derandomize_model
from examples.llama import Transformer

View File

@ -85,7 +85,6 @@ class TestInferenceMinKernels(unittest.TestCase):
def test_llama(self):
from examples.llama import Transformer
from tinygrad.shape.symbolic import Variable
args_tiny = {"dim": 512, "hidden_dim": 1024, "n_heads": 8, "n_layers": 4, "norm_eps": 1e-05, "vocab_size": 1000}
model = Transformer(**args_tiny)
for p in get_parameters(model): p.assign(np.zeros(p.shape, dtype=p.dtype.np))
@ -148,7 +147,7 @@ class TestOptWChild(unittest.TestCase):
with CLCache():
c = (a*b).sum()
d = c+1
e = c+2
e = c+2 # noqa: F841
d.realize()
assert len(CacheCollector.cache) == 2, "don't fuse if you have children"
@ -240,9 +239,9 @@ class TestOpt(unittest.TestCase):
c1 = nn.Conv2d(3,32,3)
bn = nn.BatchNorm2d(32, track_running_stats=False)
# precache the bn
img_conv = bn(c1(img)).relu().realize()
bn(c1(img)).relu().realize()
with CLCache():
img_conv = bn(c1(img)).relu().realize()
bn(c1(img)).relu().realize()
assert len(CacheCollector.cache) == 1, f"optimizer didn't fold conv-batchnorm at test time, got {len(CacheCollector.cache)}"
def test_fold_conv_batchnorm(self):

View File

@ -1,12 +1,8 @@
import io
import unittest
from pathlib import Path
import cv2
import requests # type: ignore
import numpy as np
from tinygrad.tensor import Tensor
from examples.yolov3 import Darknet, infer, show_labels
from extra.utils import fetch

View File

@ -1,9 +1,9 @@
import numpy as np
from extra.utils import fetch, download_file, get_child
from extra.utils import fetch, download_file
from examples.yolov8 import YOLOv8, get_variant_multiples, preprocess, postprocess, label_predictions
from pathlib import Path
import unittest
import io, cv2, os
import io, cv2
import onnxruntime as ort
import ultralytics
from tinygrad.nn.state import safe_load, load_state_dict

View File

@ -21,7 +21,7 @@ def run_linearizer(lin: Linearizer, rawbufs=None, var_vals=None):
prg = device.to_program(lin)
else:
prg = device.get_runner(lin.ast)
except:
except Exception:
print(lin.ast)
traceback.print_exc()
print("COMPILE FAILED!!")
@ -29,7 +29,7 @@ def run_linearizer(lin: Linearizer, rawbufs=None, var_vals=None):
try:
prg.exec(rawbufs, var_vals)
except:
except Exception:
print(lin.ast)
traceback.print_exc()
print("EXEC FAILED!!")

View File

@ -1,5 +1,4 @@
import unittest
import numpy as np
from tinygrad import Device
from tinygrad.tensor import Tensor
from tinygrad.helpers import getenv, CI

View File

@ -2,7 +2,6 @@
import unittest
import numpy as np
from tinygrad.tensor import Tensor
from tinygrad import Device
import torch
def get_question_samp(bsz, seq_len, vocab_size, seed):

View File

@ -1,6 +1,5 @@
import ast
import pathlib
import sys
import unittest
import numpy as np

View File

@ -2,7 +2,7 @@
import unittest
import numpy as np
from tinygrad.nn.state import get_parameters
from tinygrad.tensor import Tensor, Device
from tinygrad.tensor import Tensor
from tinygrad.nn import optim, BatchNorm2d
from extra.training import train, evaluate
from extra.datasets import fetch_mnist

View File

@ -3,7 +3,6 @@ import pathlib
import unittest
import numpy as np
from tinygrad.tensor import Tensor
from tinygrad import Device
class TestVGG7(unittest.TestCase):
def test_vgg7(self):

View File

@ -121,11 +121,11 @@ class TestAllocators(unittest.TestCase):
assert lru_allocator.free_space['0'] == 128 - 24, "24 bytes to be used by current cached buffers"
def always_raise_exception(*args, **kwargs):
raise Exception("OOM")
raise MemoryError("OOM")
lru_allocator._do_alloc = always_raise_exception
with pytest.raises(Exception):
buff = alloc(lru_allocator, 5, dtypes.float32, device='0')
alloc(lru_allocator, 5, dtypes.float32, device='0')
assert len(lru_allocator.aging_order['0']) == 0, "All buffers should be freed from cache due to failing alloc"
test()
check_gc()
@ -142,10 +142,10 @@ class TestAllocators(unittest.TestCase):
original_do_alloc = lru_allocator._do_alloc # save the original method
def single_fail_then_pass(*args, **kwargs):
lru_allocator._do_alloc = original_do_alloc # restore the original method
raise Exception("OOM")
raise MemoryError("OOM")
lru_allocator._do_alloc = single_fail_then_pass
buff = alloc(lru_allocator, 5, dtypes.float32, device='0')
alloc(lru_allocator, 5, dtypes.float32, device='0')
assert len(lru_allocator.aging_order['0']) < cache_length, "Some buffers should be cleaned as first alloc failed"
test()
check_gc()

View File

@ -42,11 +42,11 @@ class TestAssign(unittest.TestCase):
a.realize()
b.realize()
#GlobalCounters.cache = []
ba1 = a.lazydata.realized
bb1 = b.lazydata.realized
ba1 = a.lazydata.realized # noqa: F841
bb1 = b.lazydata.realized # noqa: F841
a.assign(a.permute(1,0) + b) # this should not work!
a.realize()
ba2 = a.lazydata.realized
ba2 = a.lazydata.realized # noqa: F841
# NOTE: don't test that it's assigned
#assert ba1 == ba2 and ba1 != bb1
np.testing.assert_allclose(a.numpy(), np.arange(N*N).reshape((N,N)) + np.arange(N*N).reshape((N,N)).transpose(1,0))

View File

@ -18,7 +18,7 @@ class TestConv(unittest.TestCase):
def test_simple_rand(self):
x = Tensor.rand(1,12,128,256)
w = Tensor.rand(32,12,3,3)
ret = x.conv2d(w, stride=(2,2), padding=(1,1)).numpy()
x.conv2d(w, stride=(2,2), padding=(1,1)).numpy()
def test_many_simple(self):
x = Tensor(np.arange(8*2*8).reshape(1,8,2,8).astype(np.float32))
@ -106,14 +106,14 @@ class TestConv(unittest.TestCase):
w = Tensor.rand(32,1,3,3)
x = x.conv2d(w, padding=(1,1), groups=32)
out = x.numpy()
x.numpy()
Tensor.no_grad = False
def test_reduce_relu(self):
Tensor.no_grad = True
x = Tensor.rand(1,12,128,256)
x = x.sum(keepdim=True).relu()
out = x.numpy()
x.numpy()
Tensor.no_grad = False
def test_bias(self):
@ -124,7 +124,7 @@ class TestConv(unittest.TestCase):
x = c(x).relu()
w = Tensor.uniform(32, 1, 3, 3)
x = x.conv2d(w, groups=32)
out = x.numpy()
x.numpy()
Tensor.no_grad = False
def test_multiadd(self):

View File

@ -36,7 +36,7 @@ class TestCopySpeed(unittest.TestCase):
print("fresh copy")
for _ in range(3):
t = Tensor.rand(N, N, device="cpu").realize()
with Timing("sync: ", on_exit=lambda ns: f" @ {t.nbytes()/ns:.2f} GB/s"):
with Timing("sync: ", on_exit=lambda ns: f" @ {t.nbytes()/ns:.2f} GB/s"): # noqa: F821
with Timing("queue: "):
t.to(Device.DEFAULT).realize()
Device[Device.DEFAULT].synchronize()
@ -60,7 +60,7 @@ class TestCopySpeed(unittest.TestCase):
with Timing("queue: "):
for g in range(6):
t.to(f"gpu:{g}").realize()
Device[f"gpu"].synchronize()
Device["gpu"].synchronize()
if __name__ == '__main__':
unittest.main()

View File

@ -34,7 +34,7 @@ def atan2_cpu(ret:LazyBuffer, a:LazyBuffer, b:LazyBuffer):
# NOTE: The derivative of atan2 doesn't need a custom op! https://www.liquisearch.com/atan2/derivative
# In general, it is also optional to write a backward function, just your backward pass won't work without it
from tinygrad.ops import LazyOp, LoadOps, BinaryOps, UnaryOps
from tinygrad.ops import LazyOp, LoadOps, BinaryOps
from tinygrad.lazy import LazyBuffer
from tinygrad.tensor import Function

View File

@ -35,7 +35,7 @@ class TestJit(unittest.TestCase):
for _ in range(5):
a = Tensor.randn(10, 10)
b = Tensor.randn(10, 10)
c = add(a, b)
add(a, b)
def test_jit_shape_mismatch(self):
@TinyJit
@ -43,7 +43,7 @@ class TestJit(unittest.TestCase):
for _ in range(5):
a = Tensor.randn(10, 10)
b = Tensor.randn(10, 10)
c = add(a, b)
add(a, b)
bad = Tensor.randn(20, 20)
with self.assertRaises(AssertionError):
add(a, bad)

View File

@ -4,7 +4,6 @@ import unittest
from tinygrad.lazy import LazyBuffer
from tinygrad import Device
from tinygrad.tensor import Tensor
from tinygrad.shape.symbolic import Variable
from tinygrad.jit import CacheCollector
class TestLazyBuffer(unittest.TestCase):
@ -50,7 +49,7 @@ class TestLazyBuffer(unittest.TestCase):
def test_children_count(self):
a = Tensor.ones(8,8,8)
d1 = a.sum((0))
d2 = a.sum((0)).reshape(32,2)
d2 = a.sum((0)).reshape(32,2) # noqa: F841
assert len(d1.lazydata.op.src[0].children) == 1
in1 = d1.reshape(16,4)
d3 = in1.reshape(8,8)

View File

@ -2,6 +2,7 @@ import unittest
from tinygrad.tensor import Tensor
# stuff needed to unpack a kernel
# ruff: noqa: F401
from tinygrad.ops import LazyOp, TernaryOps, BinaryOps, UnaryOps, ReduceOps, BufferOps, MemBuffer, ConstBuffer
from tinygrad.lazy import LazyBuffer
from tinygrad.helpers import dtypes

View File

@ -6,11 +6,10 @@ from tinygrad.helpers import OSX, CI
from test.external.fuzz_linearizer import run_linearizer
# stuff needed to unpack a kernel
from tinygrad.ops import LazyOp, TernaryOps, BinaryOps, UnaryOps, ReduceOps, BufferOps, MemBuffer, ConstBuffer
from tinygrad.ops import LazyOp, BinaryOps, UnaryOps, ReduceOps, BufferOps, MemBuffer, ConstBuffer
from tinygrad.helpers import dtypes
from tinygrad.shape.shapetracker import ShapeTracker
from tinygrad.shape.view import View
from tinygrad.shape.symbolic import Variable
inf, nan = float('inf'), float('nan')
def helper_test_lin(lin: Linearizer, opts, failed_platforms):

View File

@ -264,6 +264,7 @@ class TestNN(unittest.TestCase):
z = layer(x)
torch_x = torch.tensor(x.numpy())
torch_z = torch_layer(torch_x.permute(0,2,3,1)).permute(0,3,1,2)
np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3)
def test_instancenorm_2d(self):
N, C, H, W = 20, 5, 10, 10
@ -283,7 +284,6 @@ class TestNN(unittest.TestCase):
torch_x = torch.tensor(x.numpy())
torch_z = torch_layer(torch_x)
np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3)
np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3)
def test_instancenorm_3d(self):
N, C, D, H, W = 20, 5, 3, 10, 10
@ -303,7 +303,6 @@ class TestNN(unittest.TestCase):
torch_x = torch.tensor(x.numpy())
torch_z = torch_layer(torch_x)
np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3)
np.testing.assert_allclose(z.numpy(), torch_z.detach().numpy(), atol=5e-3, rtol=5e-3)
def test_embedding(self):
B, T, C, VS = 4, 10, 20, 28

View File

@ -910,7 +910,7 @@ class TestOps(unittest.TestCase):
@unittest.skipIf(IMAGE>0, "no conv1d on images")
def test_asymmetric_padding_conv1d(self):
for p in [(0,1), (2,1), (2,0)]:
with self.subTest(padding := p):
with self.subTest(p):
for n in [3,4]:
for k in [2]:
helper_test_op([(1,1,n), (1,1,k)],
@ -1026,7 +1026,7 @@ class TestOps(unittest.TestCase):
def test_asymmetric_padding_conv2d(self):
for p in [(0,1,0,1), (2,1,2,1), (2,0,2,1)]:
with self.subTest(padding := p):
with self.subTest(p):
for n in [3,4]:
for k in [2]:
helper_test_op([(1,1,n,n), (1,1,k,k)],

View File

@ -238,7 +238,7 @@ class TestSchedule(unittest.TestCase):
b = Tensor.empty(10)
c = Tensor.empty(10)
keep_me = a+b
e = keep_me.sum() # give keep_me a child (NOTE: BinaryOps won't be a child since it will instant fuse)
e = keep_me.sum() # noqa: F841 give keep_me a child (NOTE: BinaryOps won't be a child since it will instant fuse)
d = keep_me+c
check_schedule(d, 2)
check_schedule(keep_me, 0, [d])

View File

@ -25,12 +25,12 @@ torch_dt = torch.float16 if getenv("HALF", 0) else torch.float32
torch_device = torch.device('mps' if getenv("MPS", 0) else ('cuda' if getenv("TORCHCUDA", 0) else 'cpu'))
if str(torch_device) == "mps":
import torch.mps
sync = lambda: torch.mps.synchronize()
def sync(): torch.mps.synchronize()
elif str(torch_device) == "cuda":
import torch.cuda
sync = lambda: torch.cuda.synchronize()
def sync(): torch.cuda.synchronize()
else:
sync = lambda: None
def sync(): pass
def colorize_float(x):
ret = f"{x:7.2f}x"

View File

@ -157,7 +157,7 @@ class TestSymbolicJit(unittest.TestCase):
vi = Variable("i", 1, 10).bind(i)
a = Tensor.rand(3, i).reshape(3, vi)
b = Tensor.rand(3, i).reshape(3, vi)
c = add(a, b)
add(a, b)
vi2 = Variable("i", 1, 10).bind(7)
a = Tensor.rand(3, 7).reshape(3, vi2)
bad = Tensor.rand(4, 7).reshape(4, vi2)

View File

@ -116,9 +116,9 @@ class TestSymbolicReshape(unittest.TestCase):
def test_reshape_into_symbols_bad_shape(self):
vi = Variable("i", 1, 10).bind(4)
with self.assertRaises(ValueError):
t = Tensor.rand(4, 6).reshape(vi, 6).reshape(1, 77) # reshape to a different size new shape through symbolic shape
Tensor.rand(4, 6).reshape(vi, 6).reshape(1, 77) # reshape to a different size new shape through symbolic shape
with self.assertRaises(AssertionError):
t = Tensor.rand(3, 4).reshape(3, (vi+1)) # reshape into non-Variable Node
Tensor.rand(3, 4).reshape(3, (vi+1)) # reshape into non-Variable Node
def test_two_symbol_reshape(self):
for i in range(1, 6):

View File

@ -112,12 +112,12 @@ class TestTinygrad(unittest.TestCase):
torch_x = torch.tensor(x, requires_grad=True)
torch_W = torch.tensor(W, requires_grad=True)
torch_func = lambda x: torch.nn.functional.log_softmax(x.matmul(torch_W).relu(), dim=1)
def torch_func(x): return torch.nn.functional.log_softmax(x.matmul(torch_W).relu(), dim=1)
PJ = torch.autograd.functional.jacobian(torch_func, torch_x).squeeze().numpy()
tiny_x = Tensor(x, requires_grad=True)
tiny_W = Tensor(W, requires_grad=True)
tiny_func = lambda x: x.dot(tiny_W).relu().log_softmax()
def tiny_func(x): return x.dot(tiny_W).relu().log_softmax()
J = jacobian(tiny_func, tiny_x)
NJ = numerical_jacobian(tiny_func, tiny_x)
@ -130,7 +130,7 @@ class TestTinygrad(unittest.TestCase):
tiny_x = Tensor(x, requires_grad=True)
tiny_W = Tensor(W, requires_grad=True)
tiny_func = lambda x: x.dot(tiny_W).relu().log_softmax()
def tiny_func(x): return x.dot(tiny_W).relu().log_softmax()
self.assertTrue(gradcheck(tiny_func, tiny_x, eps = 1e-3))

View File

@ -1,4 +1,3 @@
from tinygrad import Device
from tinygrad.tensor import Tensor
import numpy as np
import pickle

View File

@ -1,4 +1,4 @@
import unittest, io
import unittest
import numpy as np
from PIL import Image
from tinygrad.helpers import Context, ContextVar, DType, dtypes, merge_dicts, strip_parens, prod, round_up, fetch

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python
import unittest
from tinygrad.shape.symbolic import Node, MulNode, SumNode, Variable, NumNode, LtNode, sym_render, sym_infer, create_rednode
from tinygrad.shape.symbolic import MulNode, SumNode, Variable, NumNode, LtNode, sym_render, sym_infer, create_rednode
class TestSymbolic(unittest.TestCase):
def helper_test_variable(self, v, n, m, s):
@ -400,7 +400,7 @@ class TestSymbolicSymbolicOps(unittest.TestCase):
c = Variable("c", 1, 2)
x = SumNode([MulNode(a, b), c])
with self.assertRaises(AssertionError):
lt3 = (x < 3)
(x < 3)
def test_nested_variable_mod(self):
i = Variable("i", 1, 5)