1
0
Fork 0

Add sin/cos/tan (#794)

* added sin/cos/tan

* fix lint

* added onnx ops support
pull/795/head
Diogo 2023-05-25 12:04:56 -04:00 committed by GitHub
parent 01ae45a43c
commit c19ef0fcce
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 37 additions and 9 deletions

View File

@ -143,6 +143,15 @@ Softmax = {1: Softmax_1, 13: Softmax_13} # Softmax default axis changed
def LogSoftmax(input, axis=-1): return input.log_softmax(axis)
def Clip(input, min=-3.4e38, max=3.4e38): return input.clip(min, max)
import math
def Sin(x): return x.sin()
def Cos(x): return x.cos()
def Tan(x): return x.tan()
def Cosh(x): return (math.e ** x + math.e ** -x) / 2
def Sinh(x): return (math.e ** x - math.e ** -x) / 2
def Tanh(x): return Sinh(x) / Cosh(x)
def Less(x, y): return (x<y).numpy().astype(bool)
def LessOrEqual(x, y): return (x<=y).numpy().astype(bool)
def Greater(x, y): return (x>y).numpy().astype(bool)

View File

@ -87,11 +87,11 @@ backend_test.exclude('test_asin_*')
backend_test.exclude('test_asinh_*')
backend_test.exclude('test_atan_*')
backend_test.exclude('test_atanh_*')
backend_test.exclude('test_cos_*')
backend_test.exclude('test_cosh_*')
backend_test.exclude('test_sin_*')
backend_test.exclude('test_sinh_*')
backend_test.exclude('test_tan_*')
# backend_test.include('test_cos_*')
# backend_test.include('test_cosh_*')
# backend_test.exclude('test_sin_*')
# backend_test.include('test_sinh_*')
# backend_test.include('test_tanh_*')
# no boolean ops (2d, 3d, 4d)
backend_test.exclude('test_and*')

View File

@ -131,6 +131,14 @@ class TestOps(unittest.TestCase):
helper_test_op([(45,65)], lambda x: 2.0**x, lambda x: 2.0**x)
def test_sqrt(self):
helper_test_op([(45,65)], lambda x: x.sqrt(), Tensor.sqrt, a=0)
def test_sin(self):
helper_test_op([(45,65)], lambda x: x.sin(), Tensor.sin, a=0)
def test_cos(self):
helper_test_op([(45,65)], lambda x: x.cos(), Tensor.cos, a=0)
def test_tan(self):
helper_test_op([(45,65)], lambda x: x.tan(), Tensor.tan, a=0)
def test_relu(self):
helper_test_op([(64,64)], lambda x: x.relu(), Tensor.relu)
def test_relu_exact(self):

View File

@ -50,6 +50,7 @@ def to_image_idx(base_shape:Tuple[int, ...], idxy:Node, valid:Node, validhacks=F
code_for_op: Final[Dict[Op, Callable]] = {
UnaryOps.EXP: lambda x: f"native_exp({x})" if NATIVE_EXPLOG else f"exp({x})",
UnaryOps.LOG: lambda x: f"native_log({x})" if NATIVE_EXPLOG else f"log({x})",
UnaryOps.SIN: lambda x: f"sin({x})",
BinaryOps.ADD: lambda a,b: f"({a}+{b})", BinaryOps.SUB: lambda a,b: f"({a}-{b})",
BinaryOps.MUL: lambda a,b: f"({a}*{b})", BinaryOps.DIV: lambda a,b: f"({a}/{b})",
BinaryOps.POW: lambda a,b: f"pow({a},{b})", BinaryOps.MAX: lambda a,b: f"max({a},{b})",

View File

@ -22,6 +22,7 @@ render_llvm = {
code_for_op: Final[Dict[Op, Callable]] = {
UnaryOps.EXP: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.exp', [ir.FloatType()]), [x], fastmath=('fast',)),
UnaryOps.LOG: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.log', [ir.FloatType()]), [x], fastmath=('fast',)),
UnaryOps.SIN: lambda builder,x: builder.call(builder._block.module.declare_intrinsic('llvm.sin', [ir.FloatType()]), [x], fastmath=('fast',)),
BinaryOps.ADD: lambda builder,x,y: builder.fadd(x,y, flags=('fast',)),
BinaryOps.SUB: lambda builder,x,y: builder.fsub(x,y, flags=('fast',)),
BinaryOps.MUL: lambda builder,x,y: builder.fmul(x,y, flags=('fast',)),

View File

@ -3,6 +3,7 @@ from tinygrad.helpers import argsort, ShapeType
from tinygrad.ops import UnaryOps, BinaryOps, ReduceOps, MovementOps
from tinygrad.tensor import Function
from tinygrad.lazy import LazyBuffer
import math
class Contiguous(Function):
def forward(self, x): return x.contiguous()
@ -17,6 +18,12 @@ class Cast(Function):
# ************* unary ops *************
class Sin(Function):
def forward(self, x: LazyBuffer) -> LazyBuffer:
self.x = x
return x.unary_op(UnaryOps.SIN)
def backward(self, grad: LazyBuffer) -> LazyBuffer:
return self.x.const_like(math.pi / 2).binary_op(BinaryOps.SUB, self.x).unary_op(UnaryOps.SIN).binary_op(BinaryOps.MUL, grad)
# NOTE: maximum(x, 0) behaves differently where x=0
class Relu(Function):
def forward(self, x:LazyBuffer) -> LazyBuffer:

View File

@ -8,7 +8,7 @@ from tinygrad.runtime.lib import RawBuffer, RawConst
# these are the llops your accelerator must implement, along with toCpu
# the Enum class doesn't work with mypy, this is static. sorry it's ugly
class UnaryOps(Enum): NOOP = auto(); EXP = auto(); LOG = auto(); CAST = auto() # noqa: E702
class UnaryOps(Enum): NOOP = auto(); EXP = auto(); LOG = auto(); CAST = auto(); SIN = auto() # noqa: E702
class BinaryOps(Enum): ADD = auto(); SUB = auto(); MUL = auto(); DIV = auto(); POW = auto(); CMPEQ = auto(); MAX = auto() # noqa: E702
class ReduceOps(Enum): SUM = auto(); MAX = auto() # noqa: E702
class FusedOps(Enum): MULACC = auto() # noqa: E702

View File

@ -27,7 +27,7 @@ def einsum_mulacc(einsum, get_strides, expand):
return mulacc
numpy_fxn_for_op: Dict[Op, Callable] = {**base_fxn_for_op, **{
UnaryOps.NOOP: lambda x: np.require(x, requirements='C'), UnaryOps.EXP: np.exp, UnaryOps.LOG: np.log, UnaryOps.CAST: lambda x,y: x.astype(y.np),
UnaryOps.NOOP: lambda x: np.require(x, requirements='C'), UnaryOps.EXP: np.exp, UnaryOps.LOG: np.log, UnaryOps.CAST: lambda x,y: x.astype(y.np), UnaryOps.SIN: np.sin,
BinaryOps.MAX: np.maximum, BinaryOps.CMPEQ: lambda x,y: (x==y).astype(np.float32),
MovementOps.PERMUTE: lambda x, order: x.transpose(order), MovementOps.PAD: np.pad, MovementOps.EXPAND: np.broadcast_to,
MovementOps.STRIDE: lambda x, arg: x[tuple(slice(None, None, i) for i in arg)],

View File

@ -9,7 +9,7 @@ device = torch.device("cuda:0" if torch.cuda.is_available() else ("mps" if geten
type_map = {torch.float16: dtypes.float16, torch.float32: dtypes.float32}
torch_fxn_for_op: Dict[Op, Callable] = {**base_fxn_for_op, **{
UnaryOps.NOOP: lambda x: x.contiguous(), UnaryOps.EXP: lambda x: x.exp(), UnaryOps.LOG: lambda x: x.log(), UnaryOps.CAST: lambda x,y: x.type(next(k for k,v in type_map.items() if v==y)),
UnaryOps.NOOP: lambda x: x.contiguous(), UnaryOps.EXP: lambda x: x.exp(), UnaryOps.LOG: lambda x: x.log(), UnaryOps.CAST: lambda x,y: x.type(next(k for k,v in type_map.items() if v==y)), UnaryOps.SIN: torch.sin,
BinaryOps.MAX: torch.maximum, BinaryOps.CMPEQ: lambda x,y: (x==y).float(),
MovementOps.PAD: lambda x, padding: torch.nn.functional.pad(x, [item for sublist in padding[::-1] for item in sublist]),
FusedOps.MULACC: einsum_mulacc(lambda s,a,b: torch.einsum(s, a.float(), b.float()).type(torch.promote_types(a.dtype, b.dtype)), lambda x: x.stride(), lambda x,s: x.expand(s)),

View File

@ -396,7 +396,9 @@ class Tensor:
def log(self): return mlops.Log.apply(self)
def exp(self): return mlops.Exp.apply(self)
def relu(self): return mlops.Relu.apply(self)
def sin(self): return mlops.Sin.apply(self)
def cos(self): return ((math.pi/2)-self).sin()
def tan(self): return self.sin() / self.cos()
# ***** math functions (unary) *****
def __neg__(self): return 0.0-self