1
0
Fork 0
tinygrab/docs/abstractions.py

407 lines
15 KiB
Python
Raw Normal View History

2023-03-12 23:31:46 -06:00
"""
Welcome to the tinygrad documentation
=================
2023-03-13 00:55:31 -06:00
this file will take you on a whirlwind journey from a Tensor all the way down
2023-03-12 23:31:46 -06:00
tinygrad has been aggressively refactored in the 2.5 years it's been worked on.
what you see here is a refined library (with more refining to go still!)
the whole tinygrad is ~2300 lines, so while it's readable in an evening or two,
this documentation will help with entry points and understanding the abstraction stack
"""
# %%
2023-03-13 00:55:31 -06:00
# == Boilerplate imports for typing ==
2023-03-12 20:05:44 -06:00
from __future__ import annotations
2023-03-14 00:10:10 -06:00
from typing import Optional, Tuple, Union, Any, Dict, Callable, Type, List, ClassVar
2023-03-12 20:05:44 -06:00
from enum import Enum, auto
from abc import ABC
2023-03-12 23:31:46 -06:00
# %%
# == Example: Tensor 2+3 ==
2023-03-13 00:55:31 -06:00
# let's trace an addition down through the layers of abstraction.
2023-03-12 23:31:46 -06:00
2023-03-13 00:55:31 -06:00
# we will be using the clang backend
2023-03-12 23:31:46 -06:00
from tinygrad.lazy import Device
Device.DEFAULT = "CLANG"
2023-03-12 20:05:44 -06:00
2023-03-13 00:55:31 -06:00
# first, 2+3 as a Tensor, the highest level
2023-03-12 23:31:46 -06:00
from tinygrad.tensor import Tensor
a = Tensor([2])
b = Tensor([3])
result = a + b
print(f"{a.numpy()} + {b.numpy()} = {result.numpy()}")
assert result.numpy()[0] == 5.
# %%
# == Tensor (in tinygrad/tensor.py, code 8/10) ==
2023-03-13 00:55:31 -06:00
# it's worth reading tinygrad/tensor.py. it's pretty beautiful
2023-03-12 23:31:46 -06:00
import tinygrad.mlops as mlops
2023-03-12 20:05:44 -06:00
# this is the good old familiar Tensor class
class Tensor:
2023-03-12 23:31:46 -06:00
# these two are pretty straightforward
2023-03-12 20:05:44 -06:00
grad: Optional[Tensor]
requires_grad: Optional[bool]
2023-03-12 23:31:46 -06:00
2023-03-12 20:05:44 -06:00
# this is the graph for the autograd engine
_ctx: Optional[Function]
# this is where the data (and other tensor properties) actually live
lazydata: LazyBuffer
2023-03-13 00:55:31 -06:00
# high level ops (hlops) are defined on this class. example: relu
2023-03-12 23:31:46 -06:00
def relu(self): return self.maximum(0)
2023-03-12 20:05:44 -06:00
2023-03-12 23:31:46 -06:00
# log is an mlop, this is the wrapper function in Tensor
def log(self): return mlops.Log.apply(self)
# all the definitions of the derivatives are subclasses of Function (like mlops.Log)
# there's only 18 mlops for derivatives for everything (in tinygrad/mlops.py, code 9/10)
# if you read one file, read mlops.py. if you read two files, also read tinygrad/tensor.py
# you can differentiate the world using the chain rule
class Function:
# example types of forward and backward
def forward(self, x:LazyBuffer) -> LazyBuffer: pass
def backward(self, x:LazyBuffer) -> LazyBuffer: pass
# %%
# == LazyBuffer (in tinygrad/lazy.py, code 5/10) ==
2023-03-12 20:05:44 -06:00
from tinygrad.helpers import DType
2023-03-12 23:31:46 -06:00
# this is where the properties live that you thought were a part of Tensor
# LazyBuffer is like a Tensor without derivatives, at the mlop layer
2023-03-12 20:05:44 -06:00
class LazyBuffer:
2023-03-12 23:31:46 -06:00
# these three define the "type" of the buffer, and they are returned as Tensor properties
device: str
shape: Tuple[int, ...]
dtype: DType
2023-03-12 20:05:44 -06:00
# a ShapeTracker is used to track things like reshapes and permutes
# all MovementOps are zero copy in tinygrad!
# the ShapeTracker specifies how the data in the RawBuffer matches to the shape
# we'll come back to this later
st: ShapeTracker
# if the LazyBuffer is realized, it has a RawBuffer
# we will come back to RawBuffers later
realized: Optional[RawBuffer]
2023-03-12 20:05:44 -06:00
# if the lazybuffer is unrealized, it has a LazyOp
# this LazyOp describes the computation needed to realize this LazyBuffer
op: Optional[LazyOp]
# LazyOp (in tinygrad/ops.py, code 4/10)
2023-03-13 00:55:31 -06:00
# in a tree they form an Abstract Syntax Tree for a single GPU kernel
2023-03-12 20:05:44 -06:00
class LazyOp:
2023-03-12 23:31:46 -06:00
op: Op # the type of the compute
src: Tuple[Union[LazyOp, LazyBuffer], ...] # the sources
arg: Optional[Any] = None # and an optional static argument
2023-03-12 20:05:44 -06:00
# there's currently 27 Ops you have to implement for an accelerator.
class UnaryOps(Enum): NOOP = auto(); EXP2 = auto(); LOG2 = auto(); CAST = auto(); SIN = auto(); SQRT = auto()
class BinaryOps(Enum): ADD = auto(); SUB = auto(); MUL = auto(); DIV = auto(); CMPEQ = auto(); MAX = auto()
2023-03-12 20:05:44 -06:00
class ReduceOps(Enum): SUM = auto(); MAX = auto()
class MovementOps(Enum): RESHAPE = auto(); PERMUTE = auto(); EXPAND = auto(); PAD = auto(); SHRINK = auto(); STRIDE = auto()
class FusedOps(Enum): MULACC = auto()
class LoadOps(Enum): EMPTY = auto(); RAND = auto(); CONST = auto(); FROM = auto(); CONTIGUOUS = auto(); CUSTOM = auto()
2023-03-12 20:05:44 -06:00
# NOTE: if you have a CompiledBuffer(DeviceBuffer)
# you do not need to implement the MovementOps
# as they are handled by the ShapeTracker(in tinygrad/shape/shapetracker.py, code 7/10)
Op = Union[UnaryOps, BinaryOps, ReduceOps, MovementOps, FusedOps, LoadOps]
2023-03-12 20:05:44 -06:00
2023-03-12 23:31:46 -06:00
# most of tinygrad/lazy.py is concerned with fusing Ops into LazyOps ASTs that map to GPUKernels
# it's beyond the scope of this tutorial, but you can read the file if interested
# %%
# == Example: LazyBuffer for 2+3 ==
from tinygrad.tensor import Tensor
from tinygrad.ops import LazyOp, BinaryOps, LoadOps
# the 2+3 from before
# added some 0s, otherwise Tensor([2]) will be folded into a constant without using LoadOps.FROM
result = Tensor([2, 0]) + Tensor([3, 0])
2023-03-12 23:31:46 -06:00
print(type(result.lazydata), result.lazydata) # let's look at the lazydata of result
# you'll see it has a LazyOp
# the op type is BinaryOps.ADD
# and it has two sources, the 2 and the 3
lazyop: LazyOp = result.lazydata.op
assert lazyop.op == BinaryOps.ADD
assert len(lazyop.src) == 2
# the first source is the 2, it comes from the CPU
# the source is a LazyBuffer that is a "CPU" Tensor
2023-03-12 23:31:46 -06:00
# again, a LazyOp AST is like a GPU kernel. you have to copy the data on the device first
assert lazyop.src[0].op.op == LoadOps.FROM
assert lazyop.src[0].op.src[0].device == "CPU"
assert lazyop.src[0].op.src[0].realized._buf[0] == 2, "the src of the FROM LazyOP is a LazyBuffer on the CPU holding [2.]"
2023-03-12 23:31:46 -06:00
assert result.lazydata.realized is None, "the LazyBuffer is not realized yet"
# now we realize the LazyBuffer
result.lazydata.realize()
assert result.lazydata.realized is not None, "the LazyBuffer is realized!"
# this brings us nicely to DeviceBuffer, of which the realized ClangBuffer is a subclass
assert 'RawMallocBuffer' in str(type(result.lazydata.realized))
2023-03-12 23:31:46 -06:00
# getting ahead of ourselves, but we can copy the DeviceBuffer toCPU
assert result.lazydata.realized.toCPU()[0] == 5, "when put in numpy with toCPU, it's 5"
# %%
# == Union[Interpreted, Compiled] (in tinygrad/ops.py, code 5/10) ==
2023-03-12 23:31:46 -06:00
# Now you have a choice, you can either write a "Interpreted" backend or "Compiled" backend
2023-03-12 20:05:44 -06:00
# Interpreted backends are very simple (example: CPU and TORCH)
class Interpreted:
# they have a backing RawBuffer
buffer: Type[RawBuffer]
2023-03-12 20:05:44 -06:00
# and they have a lookup table to functions for the Ops
fxn_for_op: Dict[Op, Callable] = {
UnaryOps.EXP2: lambda x: np.exp2(x),
BinaryOps.ADD: lambda x,y: x+y}
2023-03-12 20:05:44 -06:00
# Compiled backends take a little more (example: GPU and LLVM)
class Compiled:
# they also have a backing RawBuffer
buffer: Type[RawBuffer]
2023-03-12 20:05:44 -06:00
# a code generator, which compiles the AST
codegen: Type[Linearizer]
2023-03-12 20:05:44 -06:00
# and a runtime, which runs the generated code
runtime: Type[Runtime]
2023-03-12 20:05:44 -06:00
# Runtime is what actually runs the kernels for a compiled backend
class Runtime(ABC):
# `name` is the name of the function, and `prg` is the code
# the constructor compiles the code
def __init__(self, name:str, prg:str): pass
# call runs the code on the bufs. NOTE: the output is always bufs[0], but this is just a convention
def __call__(self, global_size:Optional[List[int]], local_size:Optional[List[int]], *bufs:List[RawBuffer]): pass
2023-03-12 20:05:44 -06:00
2023-03-12 23:31:46 -06:00
# %%
# == RawBuffer (in tinygrad/runtime/lib.py, code 5/10) ==
import numpy as np
2023-03-12 20:05:44 -06:00
2023-06-27 11:56:54 -06:00
# RawBuffer is where the data is actually held. it's pretty close to just memory
2023-03-12 20:05:44 -06:00
class RawBuffer(ABC):
# create an empty rawbuffer that holds `size` elements of type `dtype`
# `buf` is an opaque container class
def __init__(self, size:int, dtype:DType, buf:Any): raise NotImplementedError("must be implemented")
2023-03-12 20:05:44 -06:00
# fromCPU is classmethod that creates a RawBuffer, it's a classmethod since some runtimes are 0 copy
@classmethod
def fromCPU(cls:RawBuffer, x:np.ndarray) -> RawBuffer: raise NotImplementedError("must be implemented")
# toCPU converts the RawBuffer to a numpy array with shape (size,). many backends are 0 copy here
def toCPU(self) -> np.ndarray: raise NotImplementedError("must be implemented")
# RawNumpyBuffer is a RawBuffer example for numpy. It's very simple
class RawNumpyBuffer(RawBuffer):
# NOTE: the "np.ndarray" is stored in the opaque container
def __init__(self, buf:np.ndarray):
super().__init__(buf.size, dtypes.from_np(buf.dtype), buf)
@classmethod
def fromCPU(cls, x): return cls(x)
def toCPU(self): return self._buf
2023-03-12 20:05:44 -06:00
2023-03-12 23:31:46 -06:00
# %%
# == Example: 2+3 in raw clang ==
# RawMallocBuffer is the simplest concrete version of RawBuffer (in tinygrad/ops.py)
# it's used for the CLANG and LLVM backends
# it's just malloc(size * dtype.itemsize)
from tinygrad.runtime.lib import RawMallocBuffer
2023-03-12 23:31:46 -06:00
# ClangProgram is the simplest runtime (in tinygrad/runtime/ops_clang.py, code 7/10)
2023-03-12 20:05:44 -06:00
# __init__ calls clang, and __call__ calls the function in the *.so outputted by clang
# in CLANG, global_size and local_size are ignored
from tinygrad.runtime.ops_clang import ClangProgram, ClangCodegen
2023-03-12 20:05:44 -06:00
2023-03-12 23:31:46 -06:00
# a concrete example looks like this, this adds two size 1 RawBuffer
2023-03-12 20:05:44 -06:00
# first we create two numpy buffers containing 2 and 3
# then we copy the numpy in to RawMallocBuffers
# last, we create an empty output buffer
2023-03-12 23:31:46 -06:00
from tinygrad.helpers import dtypes
2023-03-12 20:05:44 -06:00
numpy_a, numpy_b = np.array([2], dtype=np.float32), np.array([3], dtype=np.float32)
input_a, input_b = RawMallocBuffer.fromCPU(numpy_a), RawMallocBuffer.fromCPU(numpy_b)
output = RawMallocBuffer(1, dtypes.float32)
# compile the program, run it, and 2+3 does indeed equal 5
program = ClangProgram("add", f"{ClangCodegen.lang.kernel_prefix} void add(float *a, float *b, float *c) {{ *a = *b + *c; }}")
2023-03-12 23:31:46 -06:00
program(None, None, output, input_a, input_b) # NOTE: the None are for global_size and local_size
print(output.toCPU())
assert output.toCPU()[0] == 5, "it's still 5"
2023-03-12 20:05:44 -06:00
np.testing.assert_allclose(output.toCPU(), numpy_a+numpy_b)
2023-03-12 23:31:46 -06:00
# %%
# == Linearizer (in tinygrad/codegen/linearizer.py, code 4/10) ==
# in the above example, we wrote the code by hand
# normally while using tinygrad you don't do that
# the first step of transforming an AST into code is to "linearize" it, think like toposort on the AST
# for that, we use the Linearizer, which turns an AST into a list of (linear) UOps
class UOps(Enum): LOOP = auto(); DEFINE_LOCAL = auto(); LOAD = auto(); ALU = auto(); CONST = auto(); ENDLOOP = auto(); STORE = auto();
class Token:
name: str
class UOp:
uop: UOps
out: Optional[Token]
vin: List[Token]
arg: Any
class Linearizer:
2023-03-12 20:05:44 -06:00
# create the kernel with the AST
# NOTE: the AST contains the CompiledBuffers themselves as the root nodes. this will change
def __init__(self, ast:LazyOp): pass
def process(self): pass
def linearize(self): pass
# when linearize is run, it fills in this list
uops: List[UOp]
2023-03-12 20:05:44 -06:00
from tinygrad.tensor import Tensor
result = Tensor(2) + Tensor(3)
# use the real Linearizer to linearize 2+3
from tinygrad.codegen.linearizer import Linearizer
linearizer = Linearizer(result.lazydata.op, result.lazydata)
linearizer.process()
linearizer.linearize()
# print the uops
for uop in linearizer.uops: print(uop)
2023-03-12 20:05:44 -06:00
# output:
"""
UOps.LOOP : [] ([], 'global')
UOps.LOOP : [] ([], 'local')
UOps.LOAD : <val1_0> [] MemOp(i=1, idx=<0>, valid=<1>)
UOps.LOAD : <val2_0> [] MemOp(i=2, idx=<0>, valid=<1>)
UOps.ALU : <alu0> [<val1_0>, <val2_0>] BinaryOps.ADD
UOps.STORE : [<alu0>] MemOp(i=0, idx=<0>, valid=<1>)
UOps.ENDLOOP : [] ([], 'global+local')
"""
2023-03-12 20:05:44 -06:00
2023-03-12 23:31:46 -06:00
# %%
# == Example: 2+3 autogenerated clang code ==
# to generate clang code, the Linearizer is wrapped with CStyleCodegen
# here, we have an example where we fetch the generated code from the JIT
2023-03-12 23:31:46 -06:00
from tinygrad.tensor import Tensor
result = Tensor(2) + Tensor(3)
2023-03-12 23:31:46 -06:00
# we have a global cache used by the JIT
# from there, we can see the generated clang code
from tinygrad.helpers import GlobalCounters
2023-03-12 23:31:46 -06:00
GlobalCounters.cache = [] # enables the cache
result.realize() # create the program and runs it
cache_saved = GlobalCounters.cache
GlobalCounters.cache = None # disable the cache
# there's one ASTRunner in the cache
assert len(cache_saved) == 1
prg, bufs = cache_saved[0]
# print the C Program :)
print(prg.prg)
# after some formatting (the compiler doesn't care)
# NOTE: the 2 and 3 are constant folded
"""
void E_1(float* data0) {
for (int idx0 = 0; idx0 < 1; idx0++) {
data0[0] = (2.0f) + (3.0f);
}
}
"""
# %%
# == Example: ShapeTracker (in tinygrad/shape/shapetracker.py, code 7/10) ==
# remember how I said you don't have to write the MovementOps for CompiledBuffers?
# that's all thanks to ShapeTracker!
# ShapeTracker tracks the indices into the RawBuffer
from tinygrad.shape.shapetracker import ShapeTracker
# create a virtual (10, 10) Tensor. this is just a shape, there's no actual tensor
a = ShapeTracker((10, 10))
# you'll see it has one view. the (10, 1 are the strides)
print(a) # ShapeTracker(shape=(10, 10), views=[View((10, 10), (10, 1), 0)])
# we can permute it, and the strides change
a.permute((1,0))
print(a) # ShapeTracker(shape=(10, 10), views=[View((10, 10), (1, 10), 0)])
# we can then reshape it, and the strides change again
# note how the permute stays applied
a.reshape((5,2,5,2))
print(a) # ShapeTracker(shape=(5, 2, 5, 2), views=[View((5, 2, 5, 2), (2, 1, 20, 10), 0)])
# now, if we were to reshape it to a (100,) shape tensor, we have to create a second view
a.reshape((100,))
print(a) # ShapeTracker(shape=(100,), views=[
# View((5, 2, 5, 2), (2, 1, 20, 10), 0),
# View((100,), (1,), 0)])
# Views stack on top of each other, to allow zero copy for any number of MovementOps
# we can render a Python expression for the index at any time
idx, _ = a.expr_idxs()
print(idx.render()) # (((idx0%10)*10)+(idx0//10))
# of course, if we reshape it back, the indexes get simple again
a.reshape((10,10))
idx, _ = a.expr_idxs()
print(idx.render()) # ((idx1*10)+idx0)
# the ShapeTracker still has two views though...
print(a) # ShapeTracker(shape=(10, 10), views=[
# View((5, 2, 5, 2), (2, 1, 20, 10), 0),
# View((10, 10), (10, 1), 0)])
# ...until we simplify it!
a.simplify()
print(a) # ShapeTracker(shape=(10, 10), views=[View((10, 10), (1, 10), 0)])
# and now we permute it back
a.permute((1,0))
print(a) # ShapeTracker(shape=(10, 10), views=[View((10, 10), (10, 1), 0)])
# and it's even contiguous
assert a.contiguous == True
# %%
# == Example: Variable (in tinygrad/shape/symbolic.py, code 6/10) ==
2023-03-12 23:31:46 -06:00
# Under the hood, ShapeTracker is powered by a small symbolic algebra library
from tinygrad.shape.symbolic import Variable
# Variable is the basic class from symbolic
# it's created with a name and a min and max (inclusive)
a = Variable("a", 0, 10)
b = Variable("b", 0, 10)
# some math examples
print((a*10).min, (a*10).max) # you'll see a*10 has a min of 0 and max of 100
print((a+b).min, (a+b).max) # 0 20, you get the idea
# but complex expressions are where it gets fun
expr = (a + b*10) % 10
print(expr.render()) # (a%10)
# as you can see, b is gone!
# one more
expr = (a*40 + b) // 20
print(expr.render()) # (a*2)
print(expr.min, expr.max) # 0 20
# this is just "(a*2)"
# since b only has a range from 0-10, it can't affect the output
# %%