entry_point
stringlengths
1
65
original_triton_python_code
stringlengths
208
619k
optimised_triton_code
stringlengths
1.15k
275k
repo_name
stringlengths
7
115
module_name
stringlengths
1
65
synthetic
bool
1 class
uuid
int64
0
18.5k
licenses
listlengths
1
6
stars
int64
0
19.8k
sha
stringlengths
40
40
repo_link
stringlengths
72
180
CircleLoss
import torch from typing import * import torch.nn as nn import torch.nn.functional as F class CircleLoss(nn.Module): def __init__(self, gamma, m): super().__init__() self.gamma = gamma self.m = m def forward(self, s_p, s_n): alpha_p = torch.clamp_min(1 + self.m - s_p, 0) alpha_n = torch.clamp_min(self.m + s_n, 0) delta_p = 1 - self.m delta_n = self.m logit_p = -self.gamma * alpha_p * (s_p - delta_p) logit_n = self.gamma * alpha_n * (s_n - delta_n) return F.softplus(torch.logsumexp(logit_p, dim=0) + torch.logsumexp (logit_n, dim=0)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'gamma': 4, 'm': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from typing import * import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_min_logsumexp_mul_rsub_softplus_sub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp10 = tl.load(in_ptr0 + (64 + x0), xmask) tmp17 = tl.load(in_ptr0 + (128 + x0), xmask) tmp24 = tl.load(in_ptr0 + (192 + x0), xmask) tmp46 = tl.load(in_ptr1 + x0, xmask) tmp53 = tl.load(in_ptr1 + (64 + x0), xmask) tmp60 = tl.load(in_ptr1 + (128 + x0), xmask) tmp67 = tl.load(in_ptr1 + (192 + x0), xmask) tmp1 = 5.0 tmp2 = tmp1 - tmp0 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = -4.0 tmp6 = tmp4 * tmp5 tmp7 = -3.0 tmp8 = tmp0 - tmp7 tmp9 = tmp6 * tmp8 tmp11 = tmp1 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp3) tmp13 = tmp12 * tmp5 tmp14 = tmp10 - tmp7 tmp15 = tmp13 * tmp14 tmp16 = triton_helpers.maximum(tmp9, tmp15) tmp18 = tmp1 - tmp17 tmp19 = triton_helpers.maximum(tmp18, tmp3) tmp20 = tmp19 * tmp5 tmp21 = tmp17 - tmp7 tmp22 = tmp20 * tmp21 tmp23 = triton_helpers.maximum(tmp16, tmp22) tmp25 = tmp1 - tmp24 tmp26 = triton_helpers.maximum(tmp25, tmp3) tmp27 = tmp26 * tmp5 tmp28 = tmp24 - tmp7 tmp29 = tmp27 * tmp28 tmp30 = triton_helpers.maximum(tmp23, tmp29) tmp31 = tl_math.abs(tmp30) tmp32 = float('inf') tmp33 = tmp31 == tmp32 tmp34 = tl.where(tmp33, tmp3, tmp30) tmp35 = tmp9 - tmp34 tmp36 = tl_math.exp(tmp35) tmp37 = tmp15 - tmp34 tmp38 = tl_math.exp(tmp37) tmp39 = tmp36 + tmp38 tmp40 = tmp22 - tmp34 tmp41 = tl_math.exp(tmp40) tmp42 = tmp39 + tmp41 tmp43 = tmp29 - tmp34 tmp44 = tl_math.exp(tmp43) tmp45 = tmp42 + tmp44 tmp47 = 4.0 tmp48 = tmp46 + tmp47 tmp49 = triton_helpers.maximum(tmp48, tmp3) tmp50 = tmp49 * tmp47 tmp51 = tmp46 - tmp47 tmp52 = tmp50 * tmp51 tmp54 = tmp53 + tmp47 tmp55 = triton_helpers.maximum(tmp54, tmp3) tmp56 = tmp55 * tmp47 tmp57 = tmp53 - tmp47 tmp58 = tmp56 * tmp57 tmp59 = triton_helpers.maximum(tmp52, tmp58) tmp61 = tmp60 + tmp47 tmp62 = triton_helpers.maximum(tmp61, tmp3) tmp63 = tmp62 * tmp47 tmp64 = tmp60 - tmp47 tmp65 = tmp63 * tmp64 tmp66 = triton_helpers.maximum(tmp59, tmp65) tmp68 = tmp67 + tmp47 tmp69 = triton_helpers.maximum(tmp68, tmp3) tmp70 = tmp69 * tmp47 tmp71 = tmp67 - tmp47 tmp72 = tmp70 * tmp71 tmp73 = triton_helpers.maximum(tmp66, tmp72) tmp74 = tl_math.abs(tmp73) tmp75 = tmp74 == tmp32 tmp76 = tl.where(tmp75, tmp3, tmp73) tmp77 = tmp52 - tmp76 tmp78 = tl_math.exp(tmp77) tmp79 = tmp58 - tmp76 tmp80 = tl_math.exp(tmp79) tmp81 = tmp78 + tmp80 tmp82 = tmp65 - tmp76 tmp83 = tl_math.exp(tmp82) tmp84 = tmp81 + tmp83 tmp85 = tmp72 - tmp76 tmp86 = tl_math.exp(tmp85) tmp87 = tmp84 + tmp86 tmp88 = tl_math.log(tmp45) tmp89 = tmp88 + tmp34 tmp90 = tl_math.log(tmp87) tmp91 = tmp90 + tmp76 tmp92 = tmp89 + tmp91 tmp93 = 20.0 tmp94 = tmp92 > tmp93 tmp95 = tl_math.exp(tmp92) tmp96 = libdevice.log1p(tmp95) tmp97 = tl.where(tmp94, tmp92, tmp96) tl.store(in_out_ptr0 + x0, tmp97, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf5 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_clamp_min_logsumexp_mul_rsub_softplus_sub_0[grid (64)](buf5, arg0_1, arg1_1, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf5, class CircleLossNew(nn.Module): def __init__(self, gamma, m): super().__init__() self.gamma = gamma self.m = m def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
IntelLabs/MICSAS
CircleLoss
false
17,429
[ "MIT", "BSD-3-Clause" ]
7
4124991a683cc10004e403f3f3eb442f58616519
https://github.com/IntelLabs/MICSAS/tree/4124991a683cc10004e403f3f3eb442f58616519
Hswish
import torch import torch.nn as nn import torch.utils.data class Hswish(nn.Module): def __init__(self, inplace=True): super(Hswish, self).__init__() self.relu = nn.ReLU6(inplace=inplace) def forward(self, x): return self.relu(x + 3) / 6 def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class HswishNew(nn.Module): def __init__(self, inplace=True): super(HswishNew, self).__init__() self.relu = nn.ReLU6(inplace=inplace) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
COEN-390/YOLOv5-Lite
Hswish
false
11,262
[ "MIT" ]
0
06a53f5d001c5d37729f55f47cbd46cc8eb63f84
https://github.com/COEN-390/YOLOv5-Lite/tree/06a53f5d001c5d37729f55f47cbd46cc8eb63f84
Scale1Minus1
import torch class Scale1Minus1(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): x = x / 254.0 * 2 - 1 return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.003937007874015748 tmp2 = tmp0 * tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_mul_sub_0[grid(256)](arg0_1, buf0, 256, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class Scale1Minus1New(torch.nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Filco306/WASP-GANs
Scale1Minus1
false
470
[ "Apache-2.0" ]
0
e50cf096a5e3eb26d33a3cbf164d728b9789e41b
https://github.com/Filco306/WASP-GANs/tree/e50cf096a5e3eb26d33a3cbf164d728b9789e41b
ExtractTensorPatches
import torch import torch.nn as nn import torch.nn.functional as F from typing import Tuple from typing import Union from typing import Optional from torch.nn.modules.utils import _pair def _extract_tensor_patchesnd(input: 'torch.Tensor', window_sizes: 'Tuple[int, ...]', strides: 'Tuple[int, ...]') ->torch.Tensor: batch_size, num_channels = input.size()[:2] dims = range(2, input.dim()) for dim, patch_size, stride in zip(dims, window_sizes, strides): input = input.unfold(dim, patch_size, stride) input = input.permute(0, *dims, 1, *[(dim + len(dims)) for dim in dims] ).contiguous() return input.view(batch_size, -1, num_channels, *window_sizes) def extract_tensor_patches(input: 'torch.Tensor', window_size: 'Union[int, Tuple[int, int]]', stride: 'Union[int, Tuple[int, int]]'=1, padding: 'Union[int, Tuple[int, int]]'=0) ->torch.Tensor: """Function that extract patches from tensors and stack them. See :class:`~kornia.contrib.ExtractTensorPatches` for details. """ if not torch.is_tensor(input): raise TypeError('Input input type is not a torch.Tensor. Got {}'. format(type(input))) if not len(input.shape) == 4: raise ValueError('Invalid input shape, we expect BxCxHxW. Got: {}'. format(input.shape)) if padding: pad_vert, pad_horz = _pair(padding) input = F.pad(input, [pad_horz, pad_horz, pad_vert, pad_vert]) return _extract_tensor_patchesnd(input, _pair(window_size), _pair(stride)) class ExtractTensorPatches(nn.Module): """Module that extract patches from tensors and stack them. In the simplest case, the output value of the operator with input size :math:`(B, C, H, W)` is :math:`(B, N, C, H_{out}, W_{out})`. where - :math:`B` is the batch size. - :math:`N` denotes the total number of extracted patches stacked in - :math:`C` denotes the number of input channels. - :math:`H`, :math:`W` the input height and width of the input in pixels. - :math:`H_{out}`, :math:`W_{out}` denote to denote to the patch size defined in the function signature. left-right and top-bottom order. * :attr:`window_size` is the size of the sliding window and controls the shape of the output tensor and defines the shape of the output patch. * :attr:`stride` controls the stride to apply to the sliding window and regulates the overlapping between the extracted patches. * :attr:`padding` controls the amount of implicit zeros-paddings on both sizes at each dimension. The parameters :attr:`window_size`, :attr:`stride` and :attr:`padding` can be either: - a single ``int`` -- in which case the same value is used for the height and width dimension. - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, and the second `int` for the width dimension. Arguments: window_size (Union[int, Tuple[int, int]]): the size of the sliding window and the output patch size. stride (Optional[Union[int, Tuple[int, int]]]): stride of the sliding window. Default is 1. padding (Optional[Union[int, Tuple[int, int]]]): Zero-padding added to both side of the input. Default is 0. Shape: - Input: :math:`(B, C, H, W)` - Output: :math:`(B, N, C, H_{out}, W_{out})` Returns: torch.Tensor: the tensor with the extracted patches. Examples: >>> input = torch.arange(9.).view(1, 1, 3, 3) >>> patches = extract_tensor_patches(input, (2, 3)) >>> input tensor([[[[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]]]) >>> patches[:, -1] tensor([[[[3., 4., 5.], [6., 7., 8.]]]]) """ def __init__(self, window_size: 'Union[int, Tuple[int, int]]', stride: 'Optional[Union[int, Tuple[int, int]]]'=1, padding: 'Optional[Union[int, Tuple[int, int]]]'=0) ->None: super(ExtractTensorPatches, self).__init__() self.window_size: 'Tuple[int, int]' = _pair(window_size) self.stride: 'Tuple[int, int]' = _pair(stride) self.padding: 'Tuple[int, int]' = _pair(padding) def forward(self, input: 'torch.Tensor') ->torch.Tensor: return extract_tensor_patches(input, self.window_size, stride=self. stride, padding=self.padding) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'window_size': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.functional as F from typing import Tuple from typing import Union from typing import Optional from torch.nn.modules.utils import _pair assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tl.store(in_out_ptr0 + x0, tmp0, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 1, 4, 4, 4), (64, 16, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_constant_pad_nd_view_0[grid(256)](buf1, arg0_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf1, def _extract_tensor_patchesnd(input: 'torch.Tensor', window_sizes: 'Tuple[int, ...]', strides: 'Tuple[int, ...]') ->torch.Tensor: batch_size, num_channels = input.size()[:2] dims = range(2, input.dim()) for dim, patch_size, stride in zip(dims, window_sizes, strides): input = input.unfold(dim, patch_size, stride) input = input.permute(0, *dims, 1, *[(dim + len(dims)) for dim in dims] ).contiguous() return input.view(batch_size, -1, num_channels, *window_sizes) def extract_tensor_patches(input: 'torch.Tensor', window_size: 'Union[int, Tuple[int, int]]', stride: 'Union[int, Tuple[int, int]]'=1, padding: 'Union[int, Tuple[int, int]]'=0) ->torch.Tensor: """Function that extract patches from tensors and stack them. See :class:`~kornia.contrib.ExtractTensorPatches` for details. """ if not torch.is_tensor(input): raise TypeError('Input input type is not a torch.Tensor. Got {}'. format(type(input))) if not len(input.shape) == 4: raise ValueError('Invalid input shape, we expect BxCxHxW. Got: {}'. format(input.shape)) if padding: pad_vert, pad_horz = _pair(padding) input = F.pad(input, [pad_horz, pad_horz, pad_vert, pad_vert]) return _extract_tensor_patchesnd(input, _pair(window_size), _pair(stride)) class ExtractTensorPatchesNew(nn.Module): """Module that extract patches from tensors and stack them. In the simplest case, the output value of the operator with input size :math:`(B, C, H, W)` is :math:`(B, N, C, H_{out}, W_{out})`. where - :math:`B` is the batch size. - :math:`N` denotes the total number of extracted patches stacked in - :math:`C` denotes the number of input channels. - :math:`H`, :math:`W` the input height and width of the input in pixels. - :math:`H_{out}`, :math:`W_{out}` denote to denote to the patch size defined in the function signature. left-right and top-bottom order. * :attr:`window_size` is the size of the sliding window and controls the shape of the output tensor and defines the shape of the output patch. * :attr:`stride` controls the stride to apply to the sliding window and regulates the overlapping between the extracted patches. * :attr:`padding` controls the amount of implicit zeros-paddings on both sizes at each dimension. The parameters :attr:`window_size`, :attr:`stride` and :attr:`padding` can be either: - a single ``int`` -- in which case the same value is used for the height and width dimension. - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension, and the second `int` for the width dimension. Arguments: window_size (Union[int, Tuple[int, int]]): the size of the sliding window and the output patch size. stride (Optional[Union[int, Tuple[int, int]]]): stride of the sliding window. Default is 1. padding (Optional[Union[int, Tuple[int, int]]]): Zero-padding added to both side of the input. Default is 0. Shape: - Input: :math:`(B, C, H, W)` - Output: :math:`(B, N, C, H_{out}, W_{out})` Returns: torch.Tensor: the tensor with the extracted patches. Examples: >>> input = torch.arange(9.).view(1, 1, 3, 3) >>> patches = extract_tensor_patches(input, (2, 3)) >>> input tensor([[[[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]]]) >>> patches[:, -1] tensor([[[[3., 4., 5.], [6., 7., 8.]]]]) """ def __init__(self, window_size: 'Union[int, Tuple[int, int]]', stride: 'Optional[Union[int, Tuple[int, int]]]'=1, padding: 'Optional[Union[int, Tuple[int, int]]]'=0) ->None: super(ExtractTensorPatchesNew, self).__init__() self.window_size: 'Tuple[int, int]' = _pair(window_size) self.stride: 'Tuple[int, int]' = _pair(stride) self.padding: 'Tuple[int, int]' = _pair(padding) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NickleDave/kornia
ExtractTensorPatches
false
2,681
[ "ECL-2.0", "Apache-2.0" ]
0
5392651d0bc268da577fa0a49aa50f957289c7dd
https://github.com/NickleDave/kornia/tree/5392651d0bc268da577fa0a49aa50f957289c7dd
ConvPlus
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/g6/cg6ggbgmjcrpyy6skkvmdrhjstximmnocghnc7q5sven6g5bhojo.py # Topologically Sorted Source Nodes: [conv2d, conv2d_1, add], Original ATen: [aten.convolution, aten.add] # Source node to ATen node mapping: # add => add # conv2d => convolution # conv2d_1 => convolution_1 # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %convolution_1), kwargs = {}) triton_poi_fused_add_convolution_0 = async_compile.triton('triton_poi_fused_add_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x3), xmask) tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + (x3), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, conv2d_1, add], Original ATen: [aten.convolution, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_convolution_0.run(buf2, primals_2, buf1, primals_5, 256, grid=grid(256), stream=stream0) del buf1 del primals_2 del primals_5 return (buf2, primals_1, primals_3, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 1), (12, 3, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 1, 3), (12, 3, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x3, xmask) tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tl.store(in_out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 1), (12, 3, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 3), (12, 3, 3, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = extern_kernels.convolution(primals_3, primals_4, stride=(1, 1), padding=(0, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1)) buf2 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_convolution_0[grid(256)](buf2, primals_2, buf1, primals_5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_2 del primals_5 return buf2, primals_1, primals_3, primals_4 class ConvPlusNew(nn.Module): def __init__(self, c1, c2, k=3, s=1, g=1, bias=True): super(ConvPlusNew, self).__init__() self.cv1 = nn.Conv2d(c1, c2, (k, 1), s, (k // 2, 0), groups=g, bias =bias) self.cv2 = nn.Conv2d(c1, c2, (1, k), s, (0, k // 2), groups=g, bias =bias) def forward(self, input_0): primals_1 = self.cv1.weight primals_2 = self.cv1.bias primals_4 = self.cv2.weight primals_5 = self.cv2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
jiangbestone/detect_rcnn
ConvPlus
false
6,938
[ "MIT" ]
1
41c4f4d3f8409cc146314c41a3d02ceafa9a7477
https://github.com/jiangbestone/detect_rcnn/tree/41c4f4d3f8409cc146314c41a3d02ceafa9a7477
MegatronGelu
import torch import torch.nn import torch.onnx class MegatronGelu(torch.nn.Module): def forward(self, x): return x * 0.5 * (torch.erf(x / 1.41421) + 1.0) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_erf_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071085623775818 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class MegatronGeluNew(torch.nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
RyanUnderhill/onnxruntime
MegatronGelu
false
11,816
[ "MIT" ]
0
6df4e293ffbb47d739d2dedfbb87fa6234b8c37c
https://github.com/RyanUnderhill/onnxruntime/tree/6df4e293ffbb47d739d2dedfbb87fa6234b8c37c
AttnGCNLayer
import math import torch import torch.nn as nn import torch.utils.data class GCNLayer(nn.Module): def __init__(self, embed_size, dropout=0.0): super().__init__() self.embed_size = embed_size self.ctx_layer = nn.Linear(self.embed_size, self.embed_size, bias=False ) self.layernorm = nn.LayerNorm(embed_size) self.dropout = nn.Dropout(dropout) def forward(self, node_fts, rel_edges): """Args: node_fts: (batch_size, num_nodes, embed_size) rel_edges: (batch_size, num_nodes, num_nodes) """ ctx_embeds = self.ctx_layer(torch.bmm(rel_edges, node_fts)) node_embeds = node_fts + self.dropout(ctx_embeds) node_embeds = self.layernorm(node_embeds) return node_embeds class AttnGCNLayer(GCNLayer): def __init__(self, embed_size, d_ff, dropout=0.0): super().__init__(embed_size, dropout=dropout) self.edge_attn_query = nn.Linear(embed_size, d_ff) self.edge_attn_key = nn.Linear(embed_size, d_ff) self.attn_denominator = math.sqrt(d_ff) def forward(self, node_fts, rel_edges): """ Args: node_fts: (batch_size, num_nodes, embed_size) rel_edges: (batch_size, num_nodes, num_nodes) """ attn_scores = torch.einsum('bod,bid->boi', self.edge_attn_query( node_fts), self.edge_attn_key(node_fts)) / self.attn_denominator attn_scores = attn_scores.masked_fill(rel_edges == 0, -1e+18) attn_scores = torch.softmax(attn_scores, dim=2) attn_scores = attn_scores.masked_fill(rel_edges == 0, 0) ctx_embeds = self.ctx_layer(torch.bmm(attn_scores, node_fts)) node_embeds = node_fts + self.dropout(ctx_embeds) node_embeds = self.layernorm(node_embeds) return node_embeds def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'embed_size': 4, 'd_ff': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 == tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl .int1) tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ).to(tl.int1) tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = -9.999999843067494e+17 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp8 = tmp7 * tmp2 tmp9 = tl.where(tmp6, tmp4, tmp8) tmp10 = triton_helpers.maximum(tmp5, tmp9) tmp13 = tmp12 * tmp2 tmp14 = tl.where(tmp11, tmp4, tmp13) tmp15 = triton_helpers.maximum(tmp10, tmp14) tmp18 = tmp17 * tmp2 tmp19 = tl.where(tmp16, tmp4, tmp18) tmp20 = triton_helpers.maximum(tmp15, tmp19) tmp21 = tmp5 - tmp20 tmp22 = tl_math.exp(tmp21) tmp23 = tmp9 - tmp20 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tmp26 = tmp14 - tmp20 tmp27 = tl_math.exp(tmp26) tmp28 = tmp25 + tmp27 tmp29 = tmp19 - tmp20 tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tl.store(out_ptr0 + x0, tmp20, xmask) tl.store(out_ptr1 + x0, tmp31, xmask) @triton.jit def triton_poi_fused__softmax_div_masked_fill_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1) tmp1 = tl.load(in_out_ptr0 + x2, xmask) tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = -9.999999843067494e+17 tmp5 = tl.where(tmp0, tmp4, tmp3) tmp7 = tmp5 - tmp6 tmp8 = tl_math.exp(tmp7) tmp10 = tmp8 / tmp9 tmp11 = 0.0 tmp12 = tl.where(tmp0, tmp11, tmp10) tl.store(in_out_ptr0 + x2, tmp10, xmask) tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 + tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 + tmp12 tmp14 = tmp10 + tmp13 tmp15 = 4.0 tmp16 = tmp14 / tmp15 tmp17 = tmp2 - tmp16 tmp18 = tmp17 * tmp17 tmp19 = tmp5 - tmp16 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp9 - tmp16 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp13 - tmp16 tmp26 = tmp25 * tmp25 tmp27 = tmp24 + tmp26 tmp28 = tmp27 / tmp15 tl.store(out_ptr0 + x0, tmp16, xmask) tl.store(out_ptr1 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x2, xmask) tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 - tmp3 tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = libdevice.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tmp11 = tmp9 * tmp10 tmp13 = tmp11 + tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_7, (4, 4), (4, 1)) assert_size_stride(primals_8, (4,), (1,)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2) buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_eq_0[grid(64)](primals_6, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_6 buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_div_masked_fill_1[grid(16)](buf3, buf2, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = buf2 del buf2 buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf3, buf4, buf5, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(buf7, primals_3, out=buf8) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf9) buf10 = buf5 del buf5 buf11 = buf4 del buf4 triton_poi_fused_add_native_layer_norm_3[grid(16)](primals_3, buf9, buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_add_native_layer_norm_4[grid(64)](primals_3, buf9, buf10, buf11, primals_8, primals_9, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf10 del buf11 del primals_9 return buf12, primals_3, primals_8, buf3, buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, primals_7, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) class GCNLayer(nn.Module): def __init__(self, embed_size, dropout=0.0): super().__init__() self.embed_size = embed_size self.ctx_layer = nn.Linear(self.embed_size, self.embed_size, bias=False ) self.layernorm = nn.LayerNorm(embed_size) self.dropout = nn.Dropout(dropout) def forward(self, node_fts, rel_edges): """Args: node_fts: (batch_size, num_nodes, embed_size) rel_edges: (batch_size, num_nodes, num_nodes) """ ctx_embeds = self.ctx_layer(torch.bmm(rel_edges, node_fts)) node_embeds = node_fts + self.dropout(ctx_embeds) node_embeds = self.layernorm(node_embeds) return node_embeds class AttnGCNLayerNew(GCNLayer): def __init__(self, embed_size, d_ff, dropout=0.0): super().__init__(embed_size, dropout=dropout) self.edge_attn_query = nn.Linear(embed_size, d_ff) self.edge_attn_key = nn.Linear(embed_size, d_ff) self.attn_denominator = math.sqrt(d_ff) def forward(self, input_0, input_1): primals_1 = self.ctx_layer.weight primals_2 = self.layernorm.weight primals_5 = self.layernorm.bias primals_4 = self.edge_attn_query.weight primals_8 = self.edge_attn_query.bias primals_7 = self.edge_attn_key.weight primals_9 = self.edge_attn_key.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
Fenkail/hgr_v2t
AttnGCNLayer
false
13,696
[ "MIT" ]
190
d8cc1c18cdaae54fd1878d6dc7b8e9c60d83fcbb
https://github.com/Fenkail/hgr_v2t/tree/d8cc1c18cdaae54fd1878d6dc7b8e9c60d83fcbb
FeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/ih/cih5ihitkvge5mvg3spw6mfpygiyr5tuqj5xls6wqbhrkjta3jqd.py # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu] # Source node to ATen node mapping: # leaky_relu => gt, mul, where # Graph fragment: # %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.01), kwargs = {}) # %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {}) triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_ptr0 + (x2), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + (x2), tmp4, None) tl.store(out_ptr1 + (x2), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2048, 4), (4, 1)) assert_size_stride(primals_2, (2048, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 2048), (2048, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2048), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.float32) # Topologically Sorted Source Nodes: [leaky_relu], Original ATen: [aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 131072, grid=grid(131072), stream=stream0) del buf0 del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf3) del primals_5 return (reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, reinterpret_tensor(buf2, (64, 2048), (2048, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.onnx assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 2048 tmp0 = tl.load(in_ptr0 + x2, None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.01 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(out_ptr0 + x2, tmp4, None) tl.store(out_ptr1 + x2, tmp7, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2048, 4), (4, 1)) assert_size_stride(primals_2, (2048,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 2048), (2048, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 2048), (2048, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 2048), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.bool) buf2 = empty_strided_cuda((4, 4, 4, 2048), (32768, 8192, 2048, 1), torch.float32) get_raw_stream(0) triton_poi_fused_leaky_relu_0[grid(131072)](buf0, primals_2, buf1, buf2, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (64, 2048), (2048, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf3) del primals_5 return reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, reinterpret_tensor(buf2, (64, 2048), (2048, 1), 0), primals_4 class FeedForwardNew(nn.Module): def __init__(self, emb_dim, ff_dim=2048, dropout=0.1): super().__init__() self.linear_1 = nn.Linear(emb_dim, ff_dim) self.dropout = nn.Dropout(dropout) self.linear_2 = nn.Linear(ff_dim, emb_dim) def forward(self, input_0): primals_1 = self.linear_1.weight primals_2 = self.linear_1.bias primals_4 = self.linear_2.weight primals_5 = self.linear_2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
chandar-lab/CriticalGradientOptimization
FeedForward
false
6,419
[ "MIT" ]
1
1af4b1df40489991289bb50bb69859a00b2c97c6
https://github.com/chandar-lab/CriticalGradientOptimization/tree/1af4b1df40489991289bb50bb69859a00b2c97c6
Theta
from torch.autograd import Function import torch import torch.nn as nn from typing import Tuple from typing import Optional import torch.nn.parallel import torch.utils.data import torch.utils.data.distributed from typing import Any class GradientReverseFunction(Function): @staticmethod def forward(ctx: 'Any', input: 'torch.Tensor', coeff: 'Optional[float]'=1.0 ) ->torch.Tensor: ctx.coeff = coeff output = input * 1.0 return output @staticmethod def backward(ctx: 'Any', grad_output: 'torch.Tensor') ->Tuple[torch. Tensor, Any]: return grad_output.neg() * ctx.coeff, None class GradientReverseLayer(nn.Module): def __init__(self): super(GradientReverseLayer, self).__init__() def forward(self, *input): return GradientReverseFunction.apply(*input) class Theta(nn.Module): """ maximize loss respect to :math:` heta` minimize loss respect to features """ def __init__(self, dim: 'int'): super(Theta, self).__init__() self.grl1 = GradientReverseLayer() self.grl2 = GradientReverseLayer() self.layer1 = nn.Linear(dim, dim) nn.init.eye_(self.layer1.weight) nn.init.zeros_(self.layer1.bias) def forward(self, features: 'torch.Tensor') ->torch.Tensor: features = self.grl1(features) return self.grl2(self.layer1(features)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.autograd import Function import torch.nn as nn from typing import Tuple from typing import Optional import torch.nn.parallel import torch.utils.data import torch.utils.data.distributed from typing import Any assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1) del primals_2 buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_mul_1[grid(256)](buf2, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0) class GradientReverseFunction(Function): @staticmethod def forward(ctx: 'Any', input: 'torch.Tensor', coeff: 'Optional[float]'=1.0 ) ->torch.Tensor: ctx.coeff = coeff output = input * 1.0 return output @staticmethod def backward(ctx: 'Any', grad_output: 'torch.Tensor') ->Tuple[torch. Tensor, Any]: return grad_output.neg() * ctx.coeff, None class GradientReverseLayer(nn.Module): def __init__(self): super(GradientReverseLayer, self).__init__() def forward(self, *input): return GradientReverseFunction.apply(*input) class ThetaNew(nn.Module): """ maximize loss respect to :math:` heta` minimize loss respect to features """ def __init__(self, dim: 'int'): super(ThetaNew, self).__init__() self.grl1 = GradientReverseLayer() self.grl2 = GradientReverseLayer() self.layer1 = nn.Linear(dim, dim) nn.init.eye_(self.layer1.weight) nn.init.zeros_(self.layer1.bias) def forward(self, input_0): primals_2 = self.layer1.weight primals_3 = self.layer1.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Liuhong99/CST
Theta
false
8,502
[ "MIT" ]
20
f6653a4ee7968fa3ba875a182670636f648be783
https://github.com/Liuhong99/CST/tree/f6653a4ee7968fa3ba875a182670636f648be783
Classify
import torch import torch.nn as nn def autopad(k, p=None): if p is None: p = k // 2 if isinstance(k, int) else [(x // 2) for x in k] return p class Classify(nn.Module): def __init__(self, c1, c2, k=1, s=1, p=None, g=1): super().__init__() self.aap = nn.AdaptiveAvgPool2d(1) self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) self.flat = nn.Flatten() def forward(self, x): z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) return self.flat(self.conv(z)) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'c1': 4, 'c2': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 16.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del primals_1 buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) buf3 = reinterpret_tensor(buf2, (4, 4, 1, 1), (4, 1, 16, 16), 0) del buf2 triton_poi_fused_convolution_1[grid(16)](buf3, primals_3, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf3, (4, 4), (4, 1), 0), primals_2, buf1 def autopad(k, p=None): if p is None: p = k // 2 if isinstance(k, int) else [(x // 2) for x in k] return p class ClassifyNew(nn.Module): def __init__(self, c1, c2, k=1, s=1, p=None, g=1): super().__init__() self.aap = nn.AdaptiveAvgPool2d(1) self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) self.flat = nn.Flatten() def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
HarryPham0123/FPT_data_centric_competition
Classify
false
5,305
[ "Apache-2.0" ]
1
3fa1e0ac48fdae2649b639229d9a74f75e461878
https://github.com/HarryPham0123/FPT_data_centric_competition/tree/3fa1e0ac48fdae2649b639229d9a74f75e461878
MLP
import torch import torch.nn as nn class MLP(nn.Module): def __init__(self, input_dim, hidden_dim, num_layers, output_dim): super(MLP, self).__init__() self.hidden_dim = hidden_dim self.num_layers = num_layers self.fc1 = nn.Linear(input_dim, hidden_dim) self.act = nn.ReLU() self.fc2 = nn.Linear(hidden_dim, output_dim) def forward(self, x): hidden = self.fc1(x) relu = self.act(hidden) output = self.fc2(relu[:, -1, :]) return output def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'hidden_dim': 4, 'num_layers': 1, 'output_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x4, tmp4, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_relu_threshold_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](buf0, primals_2, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0) del buf2 triton_poi_fused_add_1[grid(64)](buf3, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_2[grid(256)](buf0, primals_2, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del primals_2 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (16, 4), (4, 1), 0), primals_4, buf4 class MLPNew(nn.Module): def __init__(self, input_dim, hidden_dim, num_layers, output_dim): super(MLPNew, self).__init__() self.hidden_dim = hidden_dim self.num_layers = num_layers self.fc1 = nn.Linear(input_dim, hidden_dim) self.act = nn.ReLU() self.fc2 = nn.Linear(hidden_dim, output_dim) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
NefeliTav/Stock-Prediction
MLP
false
2,673
[ "Apache-2.0" ]
0
b422a246c762685ceb94c9714a2322fce71186e1
https://github.com/NefeliTav/Stock-Prediction/tree/b422a246c762685ceb94c9714a2322fce71186e1
Beta2
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/up/cup6o5cbzbunxrgsjove7ghgc4dkxlfpf5zncfpxjlrgo5zjrjpe.py # Topologically Sorted Source Nodes: [mean, sub_2, exp, var, truediv_1, mul_1, sub_3, sub, truediv, pow_2, mul, alpha, beta], Original ATen: [aten.sigmoid, aten.rsub, aten.exp, aten.pow, aten.div, aten.mul, aten.sub] # Source node to ATen node mapping: # alpha => sub_1 # beta => sub_4 # exp => exp # mean => sigmoid # mul => mul # mul_1 => mul_1 # pow_2 => pow_2 # sub => sub # sub_2 => sub_2 # sub_3 => sub_3 # truediv => div # truediv_1 => div_1 # var => pow_1 # Graph fragment: # %sigmoid : [num_users=5] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg1_1,), kwargs = {}) # %pow_1 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%exp, 2), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %pow_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, %sigmoid), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, 1), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %pow_1), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sigmoid, 2), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %pow_2), kwargs = {}) # %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sigmoid), kwargs = {}) # %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub_3, %sub_1), kwargs = {}) triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp4 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5 * tmp5 tmp7 = tmp3 / tmp6 tmp8 = tmp1 * tmp1 tmp9 = tmp7 * tmp8 tmp10 = tmp9 - tmp1 tmp11 = tmp7 * tmp1 tmp12 = tmp11 - tmp2 tmp13 = tmp12 - tmp10 tl.store(out_ptr0 + (x2), tmp10, xmask) tl.store(out_ptr1 + (x2), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, sub_2, exp, var, truediv_1, mul_1, sub_3, sub, truediv, pow_2, mul, alpha, beta], Original ATen: [aten.sigmoid, aten.rsub, aten.exp, aten.pow, aten.div, aten.mul, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0.run(arg0_1, arg1_1, buf0, buf1, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf0, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math import numpy as np import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.sigmoid(tmp0) tmp2 = 1.0 tmp3 = tmp2 - tmp1 tmp5 = tl_math.exp(tmp4) tmp6 = tmp5 * tmp5 tmp7 = tmp3 / tmp6 tmp8 = tmp1 * tmp1 tmp9 = tmp7 * tmp8 tmp10 = tmp9 - tmp1 tmp11 = tmp7 * tmp1 tmp12 = tmp11 - tmp2 tmp13 = tmp12 - tmp10 tl.store(out_ptr0 + x2, tmp10, xmask) tl.store(out_ptr1 + x2, tmp13, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (1, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_exp_mul_pow_rsub_sigmoid_sub_0[grid(256)](arg0_1, arg1_1, buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, buf1 class BoundedBeta(torch.distributions.Beta): def log_prob(self, x): return super().log_prob((x + 1) / 2) class Beta2New(nn.Module): def __init__(self, action_dim, init_std=0.25, learn_std=False): super(Beta2New, self).__init__() assert init_std < 0.5, 'Beta distribution has a max std dev of 0.5' self.action_dim = action_dim self.logstd = nn.Parameter(torch.ones(1, action_dim) * np.log( init_std), requires_grad=learn_std) self.learn_std = learn_std def sample(self, x, deterministic): if deterministic is False: action = self.evaluate(x).sample() else: return self.evaluate(x).mean return 2 * action - 1 def evaluate(self, x): alpha, beta = self(x) return BoundedBeta(alpha, beta) def forward(self, input_0): arg1_1 = self.logstd arg0_1 = input_0 output = call([arg0_1, arg1_1]) return output[0], output[1]
RohanPankaj/apex
Beta2
false
998
[ "MIT" ]
0
74e96386bf9446d1179106d6d65ea0368c1b5b27
https://github.com/RohanPankaj/apex/tree/74e96386bf9446d1179106d6d65ea0368c1b5b27
OptimizedBlock
import math import torch import torch.nn as nn import torch.nn.functional as F def _downsample(x): return F.avg_pool2d(x, 2) class OptimizedBlock(nn.Module): def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.relu, bn=False): super(OptimizedBlock, self).__init__() self.activation = activation self.bn = bn self.c1 = nn.Conv2d(in_channels, out_channels, ksize, 1, pad, bias= False) nn.init.xavier_uniform_(self.c1.weight, gain=math.sqrt(2.0)) self.c2 = nn.Conv2d(out_channels, out_channels, ksize, 1, pad, bias =False) nn.init.xavier_uniform_(self.c2.weight, gain=math.sqrt(2.0)) self.c_sc = nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False) nn.init.xavier_uniform_(self.c_sc.weight, gain=1.0) if self.bn: self.b1 = nn.BatchNorm2d(out_channels) nn.init.constant_(self.b1.weight, 1.0) self.b2 = nn.BatchNorm2d(out_channels) nn.init.constant_(self.b2.weight, 1.0) self.b_sc = nn.BatchNorm2d(out_channels) nn.init.constant_(self.b_sc.weight, 1.0) def residual(self, x): h = x h = self.b1(self.c1(h)) if self.bn else self.c1(h) h = self.activation(h) h = self.b2(self.c2(h)) if self.bn else self.c2(h) h = _downsample(h) return h def shortcut(self, x): return self.b_sc(self.c_sc(_downsample(x))) if self.bn else self.c_sc( _downsample(x)) def forward(self, x): return self.residual(x) + self.shortcut(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import math import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_relu_0(in_out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tl.store(in_out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_avg_pool2d_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 2 x1 = xindex // 2 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_out_ptr0 + x2, xmask) tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp7 = 0.25 tmp8 = tmp6 * tmp7 tmp10 = tmp8 + tmp9 tl.store(in_out_ptr0 + x2, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_relu_0[grid(256)](buf1, 256, XBLOCK=128, num_warps =4, num_stages=1) buf2 = extern_kernels.convolution(buf1, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) triton_poi_fused_avg_pool2d_1[grid(64)](primals_1, buf3, 64, XBLOCK =64, num_warps=1, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 2, 2), (16, 4, 2, 1)) buf5 = buf4 del buf4 triton_poi_fused_add_avg_pool2d_2[grid(64)](buf5, buf2, 64, XBLOCK= 64, num_warps=1, num_stages=1) return buf5, primals_1, primals_2, primals_3, primals_4, buf1, buf2, buf3 def _downsample(x): return F.avg_pool2d(x, 2) class OptimizedBlockNew(nn.Module): def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.relu, bn=False): super(OptimizedBlockNew, self).__init__() self.activation = activation self.bn = bn self.c1 = nn.Conv2d(in_channels, out_channels, ksize, 1, pad, bias= False) nn.init.xavier_uniform_(self.c1.weight, gain=math.sqrt(2.0)) self.c2 = nn.Conv2d(out_channels, out_channels, ksize, 1, pad, bias =False) nn.init.xavier_uniform_(self.c2.weight, gain=math.sqrt(2.0)) self.c_sc = nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False) nn.init.xavier_uniform_(self.c_sc.weight, gain=1.0) if self.bn: self.b1 = nn.BatchNorm2d(out_channels) nn.init.constant_(self.b1.weight, 1.0) self.b2 = nn.BatchNorm2d(out_channels) nn.init.constant_(self.b2.weight, 1.0) self.b_sc = nn.BatchNorm2d(out_channels) nn.init.constant_(self.b_sc.weight, 1.0) def residual(self, x): h = x h = self.b1(self.c1(h)) if self.bn else self.c1(h) h = self.activation(h) h = self.b2(self.c2(h)) if self.bn else self.c2(h) h = _downsample(h) return h def shortcut(self, x): return self.b_sc(self.c_sc(_downsample(x))) if self.bn else self.c_sc( _downsample(x)) def forward(self, input_0): primals_2 = self.c1.weight primals_3 = self.c2.weight primals_4 = self.c_sc.weight primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
appa-ayephyu/RobGAN
OptimizedBlock
false
1,463
[ "MIT" ]
0
1d4577edb5b858e9d0c1e76a4c323de18201190c
https://github.com/appa-ayephyu/RobGAN/tree/1d4577edb5b858e9d0c1e76a4c323de18201190c
GeneralizedMeanPooling
import torch from torch import nn class GeneralizedMeanPooling(nn.Module): """Applies a 2D power-average adaptive pooling over an input signal composed of several input planes. The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)` - At p = infinity, one gets Max Pooling - At p = 1, one gets Average Pooling The output is of size H x W, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size of the image of the form H x W. Can be a tuple (H, W) or a single H for a square image H x H H and W can be either a ``int``, or ``None`` which means the size will be the same as that of the input. """ def __init__(self, norm, output_size=1, eps=1e-06): super(GeneralizedMeanPooling, self).__init__() assert norm > 0 self.p = float(norm) self.output_size = output_size self.eps = eps def forward(self, x): x = x.clamp(min=self.eps).pow(self.p) return torch.nn.functional.adaptive_avg_pool2d(x, self.output_size ).pow(1.0 / self.p) def __repr__(self): return self.__class__.__name__ + '(' + str(self.p ) + ', ' + 'output_size=' + str(self.output_size) + ')' def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'norm': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_clamp_mean_pow_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = 1e-06 tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp3 = tmp2 * tmp2 tmp4 = tmp3 * tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.sum(tmp7, 1)[:, None] tmp9 = 16.0 tmp10 = tmp8 / tmp9 tmp11 = 0.25 tmp12 = libdevice.pow(tmp10, tmp11) tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp12, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32) buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0) del buf0 get_raw_stream(0) triton_per_fused_clamp_mean_pow_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class GeneralizedMeanPoolingNew(nn.Module): """Applies a 2D power-average adaptive pooling over an input signal composed of several input planes. The function computed is: :math:`f(X) = pow(sum(pow(X, p)), 1/p)` - At p = infinity, one gets Max Pooling - At p = 1, one gets Average Pooling The output is of size H x W, for any input size. The number of output features is equal to the number of input planes. Args: output_size: the target output size of the image of the form H x W. Can be a tuple (H, W) or a single H for a square image H x H H and W can be either a ``int``, or ``None`` which means the size will be the same as that of the input. """ def __init__(self, norm, output_size=1, eps=1e-06): super(GeneralizedMeanPoolingNew, self).__init__() assert norm > 0 self.p = float(norm) self.output_size = output_size self.eps = eps def __repr__(self): return self.__class__.__name__ + '(' + str(self.p ) + ', ' + 'output_size=' + str(self.output_size) + ')' def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
gmt710/fast-reid
GeneralizedMeanPooling
false
12,441
[ "Apache-2.0" ]
0
44a609280013eb6928f67c418c7212d67e40fb5d
https://github.com/gmt710/fast-reid/tree/44a609280013eb6928f67c418c7212d67e40fb5d
PairwiseBCELoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/yq/cyqlwjkng5qjbweegxbaulhoijwmfrlulsfh74jigzz46wxrt44f.py # Topologically Sorted Source Nodes: [bce_loss, loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mean] # Source node to ATen node mapping: # bce_loss => abs_1, exp, full_default, log1p, minimum, mul, neg, sub_1, sub_2, sub_3 # loss => mean # Graph fragment: # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {}) # %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg0_1), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {}) # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {}) # %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {}) # %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {}) # %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_3,), kwargs = {}) triton_per_fused_binary_cross_entropy_with_logits_mean_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp3 = tl.load(in_ptr1 + (r0), None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp17, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [bce_loss, loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mean] stream0 = get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_mean_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn from abc import abstractmethod import torch.utils.data.dataloader import torch.nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_binary_cross_entropy_with_logits_mean_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp16 = 256.0 tmp17 = tmp15 / tmp16 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp17, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_binary_cross_entropy_with_logits_mean_0[grid(1)](buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, class SimilarityLoss(nn.Module): def __init__(self): super(SimilarityLoss, self).__init__() @abstractmethod def forward(self, inputs, targets): pass class PairwiseBCELossNew(SimilarityLoss): """ Binary cross entropy between pair similarities and pair labels. """ def __init__(self, balanced=False): super(PairwiseBCELossNew, self).__init__() self.balanced = balanced def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
GT-SALT/LADA
PairwiseBCELoss
false
8,138
[ "MIT" ]
31
2838a4c90694bf1054c6bab7f3b60ab5e04a5d4d
https://github.com/GT-SALT/LADA/tree/2838a4c90694bf1054c6bab7f3b60ab5e04a5d4d
ConvLayer
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/gk/cgkf6erwdwyiy4xdm222c6rkwywjqetjz7vncozpnvl25mi55bq2.py # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # conv2d => convolution # relu => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024, 4096], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1024 xnumel = 3136 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_out_ptr0 + (x2 + (3136*y3)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (y0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x2 + (3136*y3)), tmp4, xmask) tl.store(out_ptr0 + (y0 + (256*x2) + (802816*y1)), tmp6, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (256, 1, 9, 9), (81, 81, 9, 1)) assert_size_stride(primals_2, (256, ), (1, )) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 56, 56), (802816, 3136, 56, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 256, 56, 56), (802816, 1, 14336, 256), torch.bool) # Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf2, 1024, 3136, grid=grid(1024, 3136), stream=stream0) del primals_2 return (buf1, primals_1, primals_3, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((256, 1, 9, 9), (81, 81, 9, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): xnumel = 3136 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_out_ptr0 + (x2 + 3136 * y3), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + y0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1, 1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + (x2 + 3136 * y3), tmp4, xmask) tl.store(out_ptr0 + (y0 + 256 * x2 + 802816 * y1), tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (256, 1, 9, 9), (81, 81, 9, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 256, 56, 56), (802816, 3136, 56, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 256, 56, 56), (802816, 1, 14336, 256), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(1024, 3136) ](buf1, primals_2, buf2, 1024, 3136, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3, buf2 class ConvLayerNew(nn.Module): """A Convolutional Layer""" def __init__(self, in_channels=1, out_channels=256, kernel_size=9, stride=1 ): super(ConvLayerNew, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
VIVelev/capsnets
ConvLayer
false
1,176
[ "MIT" ]
0
dca4bfcd4007977a6bc3534a4676880326fcf94a
https://github.com/VIVelev/capsnets/tree/dca4bfcd4007977a6bc3534a4676880326fcf94a
PARALoss
import torch import torch.nn as nn import torch.nn.functional as F class PARALoss(nn.Module): """ Softmax classifier for sentence-level relation extraction. """ def __init__(self): """ Args: sentence_encoder: encoder for sentences num_class: number of classes id2rel: dictionary of id -> relation name mapping """ super().__init__() def forward(self, score, predicate_one_hot_labels): entity_mask = predicate_one_hot_labels.sum(dim=1, keepdim=True ).repeat_interleave(score.shape[1], dim=1) entity_mask = (entity_mask > 0).float() entity_sum = (entity_mask != 0).sum(dim=(2, 3)).float() loss = ((F.binary_cross_entropy(score, predicate_one_hot_labels, reduction='none') * entity_mask).sum(dim=(2, 3)) / entity_sum ).mean() if loss.item() < 0: None return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused__to_copy_binary_cross_entropy_gt_mul_ne_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r2 = rindex x3 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + (r2 + 16 * x3), xmask, other=0.0) tmp3 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0) tmp13 = tl.load(in_ptr0 + (r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tl.load(in_ptr0 + (16 + r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tl.load(in_ptr0 + (32 + r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp18 = tl.load(in_ptr0 + (48 + r2 + 64 * x1), xmask, eviction_policy= 'evict_last', other=0.0) tmp1 = 1.0 tmp2 = tmp0 - tmp1 tmp4 = -tmp3 tmp5 = libdevice.log1p(tmp4) tmp6 = -100.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp2 * tmp7 tmp9 = tl_math.log(tmp3) tmp10 = triton_helpers.maximum(tmp9, tmp6) tmp11 = tmp0 * tmp10 tmp12 = tmp8 - tmp11 tmp15 = tmp13 + tmp14 tmp17 = tmp15 + tmp16 tmp19 = tmp17 + tmp18 tmp20 = 0.0 tmp21 = tmp19 > tmp20 tmp22 = tmp21.to(tl.float32) tmp23 = tmp12 * tmp22 tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK]) tmp26 = tl.where(xmask, tmp24, 0) tmp27 = tl.sum(tmp26, 1)[:, None] tmp28 = tmp22 != tmp20 tmp29 = tmp28.to(tl.int64) tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK]) tmp32 = tl.where(xmask, tmp30, 0) tmp33 = tl.sum(tmp32, 1)[:, None] tl.store(out_ptr0 + x3, tmp27, xmask) tl.store(out_ptr1 + x3, tmp33, xmask) @triton.jit def triton_per_fused__to_copy_div_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tmp1.to(tl.float32) tmp3 = tmp0 / tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.sum(tmp4, 1)[:, None] tmp7 = 16.0 tmp8 = tmp6 / tmp7 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.int64) get_raw_stream(0) triton_per_fused__to_copy_binary_cross_entropy_gt_mul_ne_sum_0[grid(16) ](arg0_1, arg1_1, buf0, buf1, 16, 16, XBLOCK=8, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf2 = empty_strided_cuda((), (), torch.float32) buf3 = buf2 del buf2 triton_per_fused__to_copy_div_mean_1[grid(1)](buf3, buf0, buf1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 return buf3, class PARALossNew(nn.Module): """ Softmax classifier for sentence-level relation extraction. """ def __init__(self): """ Args: sentence_encoder: encoder for sentences num_class: number of classes id2rel: dictionary of id -> relation name mapping """ super().__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
igorvlnascimento/redn
PARALoss
false
15,598
[ "MIT" ]
100
f40f19a0fdfbb11a7987996d520716a05bafd77b
https://github.com/igorvlnascimento/redn/tree/f40f19a0fdfbb11a7987996d520716a05bafd77b
Net2
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/xq/cxqxvwhuevcb5oe7gsgfej3rmce7mdc6vqg3mkviccgugis2c7ro.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (27*y1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/az/cazvf33aclbntgyixs3zlm6bdzs672xtry2xl6pc3l3sjzwrnks5.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4096], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 12 xnumel = 4096 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = (yindex // 3) tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (3*x2) + (12288*y1)), tmp0, ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xs/cxsarnw2wm2gid2judloczqftyialh3etpmvbejw7tuglcm5m2ir.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 8192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = (yindex // 64) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (64*x2) + (576*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/eg/cegkhpirbdizrfwpxf7z5iyj5fnizpk2ywd44ximmmde2qo6slhl.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32768, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 32768 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = (yindex // 128) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (128*x2) + (1152*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/7b/c7bbwyvole23x2v7i53wmwj7rky54jyyg54fpdfn5ds2jdfkoyaq.py # Unsorted Source Nodes: [], Original ATen: [] # Source node to ATen node mapping: triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 131072 xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = (yindex // 256) tmp0 = tl.load(in_ptr0 + (x2 + (9*y3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (256*x2) + (2304*y1)), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/ar/carn2mg5lupz3lxons37n746n4rfmsv23xjvostd3syds3oglwp2.py # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d => convolution # x => expm1, gt, mul, mul_2, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 1.0), kwargs = {}) # %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {}) triton_poi_fused_convolution_elu_5 = async_compile.triton('triton_poi_fused_convolution_elu_5', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1048576 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x2), tmp2, None) tl.store(out_ptr0 + (x2), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zv/czvqge5ahupojnvz2v7h44ldtkuavcrkodam2sukxqiix3phrxgp.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_1 => getitem, getitem_1 # Graph fragment: # %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_6 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_6', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = (xindex // 64) % 32 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (128*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + (128*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (128*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + (128*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/b2/cb2sfzroyoxchqx3cr3avag76mlmvkcxotnwxafekmxywxhzidux.py # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # x_2 => expm1_1, gt_1, mul_3, mul_5, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 1.0), kwargs = {}) # %expm1_1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_3,), kwargs = {}) # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_1, 1.0), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %mul_3, %mul_5), kwargs = {}) triton_poi_fused_convolution_elu_7 = async_compile.triton('triton_poi_fused_convolution_elu_7', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[524288], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 524288 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x2), tmp2, None) tl.store(out_ptr0 + (x2), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/5e/c5e3ctm73sqrl6erzojlu6nmx4xtjv6f6ujhufzvnjv5pwrl5p7r.py # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_3 => getitem_2, getitem_3 # Graph fragment: # %getitem_2 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 0), kwargs = {}) # %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_8 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_8', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = (xindex // 128) % 16 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (256*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + (256*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (256*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + (256*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/sz/cszm6vdzb6mb4b4ml3xizu45viqrla5l2yh5635oxdyjsdowmwr7.py # Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_2 => convolution_2 # x_4 => expm1_2, gt_2, mul_6, mul_8, where_2 # Graph fragment: # %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_2, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {}) # %mul_6 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 1.0), kwargs = {}) # %expm1_2 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_6,), kwargs = {}) # %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_2, 1.0), kwargs = {}) # %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %mul_6, %mul_8), kwargs = {}) triton_poi_fused_convolution_elu_9 = async_compile.triton('triton_poi_fused_convolution_elu_9', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[262144], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 262144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x2), tmp2, None) tl.store(out_ptr0 + (x2), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/7f/c7fwneqy7a24hb73o3mp4pzr6b2pel5hiw6qfzsrpjjfjqigw5w7.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_5 => getitem_4, getitem_5 # Graph fragment: # %getitem_4 : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 0), kwargs = {}) # %getitem_5 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_2, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_10 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_10', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[65536], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 65536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = (xindex // 256) % 8 x2 = (xindex // 2048) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (512*x1) + (8192*x2)), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + (512*x1) + (8192*x2)), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + (512*x1) + (8192*x2)), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + (512*x1) + (8192*x2)), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + (x3), tmp6, None) tl.store(out_ptr1 + (x3), tmp16, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/xn/cxnxyghtee3hfl6a74e2ey4rqjygwrln5zypgyxf5fseymwrpfj5.py # Topologically Sorted Source Nodes: [conv2d_3, x_6], Original ATen: [aten.convolution, aten.elu] # Source node to ATen node mapping: # conv2d_3 => convolution_3 # x_6 => expm1_3, gt_3, mul_11, mul_9, where_3 # Graph fragment: # %convolution_3 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem_4, %primals_8, %primals_9, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {}) # %mul_9 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 1.0), kwargs = {}) # %expm1_3 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_9,), kwargs = {}) # %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_3, 1.0), kwargs = {}) # %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %mul_9, %mul_11), kwargs = {}) triton_poi_fused_convolution_elu_11 = async_compile.triton('triton_poi_fused_convolution_elu_11', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[131072], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_elu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_elu_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 131072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + (x2), tmp2, None) tl.store(out_ptr0 + (x2), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/iq/ciqm5cll7gby6f2dxlejez3op7wo3nmsdaikqy6w7ahykcwusapd.py # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices] # Source node to ATen node mapping: # x_7 => _low_memory_max_pool2d_with_offsets_3, getitem_7 # Graph fragment: # %_low_memory_max_pool2d_with_offsets_3 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%where_3, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {}) # %getitem_7 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_3, 1), kwargs = {}) triton_poi_fused_max_pool2d_with_indices_12 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_12', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 512], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 512 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y5 = yindex y4 = (yindex // 16) y6 = yindex % 16 tmp0 = tl.load(in_ptr0 + (x2 + (1024*y0) + (8192*y1)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (512 + x2 + (1024*y0) + (8192*y1)), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4096 + x2 + (1024*y0) + (8192*y1)), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (4608 + x2 + (1024*y0) + (8192*y1)), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + (512*y5)), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + (16*x2) + (8192*y4)), tmp16, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/kb/ckblopawveaa4fcohzc5c56jfd2kkpiv3mymvxmp7uxqm7lgbede.py # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_9 => expm1_4, gt_4, mul_12, mul_14, where_4 # Graph fragment: # %gt_4 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm, 0), kwargs = {}) # %mul_12 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm, 1.0), kwargs = {}) # %expm1_4 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_12,), kwargs = {}) # %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_4, 1.0), kwargs = {}) # %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %mul_12, %mul_14), kwargs = {}) triton_poi_fused_elu_13 = async_compile.triton('triton_poi_fused_elu_13', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_13(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 2048 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/eu/ceuca5bl73qkpewrgv7wwewoaxw56rsqjdi7uetbctm4a76wn4pt.py # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_10 => expm1_5, gt_5, mul_15, mul_17, where_5 # Graph fragment: # %gt_5 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm_1, 0), kwargs = {}) # %mul_15 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_1, 1.0), kwargs = {}) # %expm1_5 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_15,), kwargs = {}) # %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_5, 1.0), kwargs = {}) # %where_5 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_5, %mul_15, %mul_17), kwargs = {}) triton_poi_fused_elu_14 = async_compile.triton('triton_poi_fused_elu_14', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_14(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/b3/cb3k7rcbdtiij34c2ssloi7lqph2yffioahqqabe2xpr3c6ombhk.py # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_11 => expm1_6, gt_6, mul_18, mul_20, where_6 # Graph fragment: # %gt_6 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm_2, 0), kwargs = {}) # %mul_18 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_2, 1.0), kwargs = {}) # %expm1_6 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_18,), kwargs = {}) # %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_6, 1.0), kwargs = {}) # %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %mul_18, %mul_20), kwargs = {}) triton_poi_fused_elu_15 = async_compile.triton('triton_poi_fused_elu_15', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_15(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/f4/cf4hc3mjswsaoaaluwkaxixhchimowwuxv2mpymox445rjndcelt.py # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.elu] # Source node to ATen node mapping: # x_12 => expm1_7, gt_7, mul_21, mul_23, where_7 # Graph fragment: # %gt_7 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%addmm_3, 0), kwargs = {}) # %mul_21 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_3, 1.0), kwargs = {}) # %expm1_7 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul_21,), kwargs = {}) # %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1_7, 1.0), kwargs = {}) # %where_7 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_7, %mul_21, %mul_23), kwargs = {}) triton_poi_fused_elu_16 = async_compile.triton('triton_poi_fused_elu_16', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16384], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_elu_16(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19 = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (128, ), (1, )) assert_size_stride(primals_6, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (256, ), (1, )) assert_size_stride(primals_8, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (512, ), (1, )) assert_size_stride(primals_10, (128, 2048), (2048, 1)) assert_size_stride(primals_11, (128, ), (1, )) assert_size_stride(primals_12, (256, 128), (128, 1)) assert_size_stride(primals_13, (256, ), (1, )) assert_size_stride(primals_14, (512, 256), (256, 1)) assert_size_stride(primals_15, (512, ), (1, )) assert_size_stride(primals_16, (1024, 512), (512, 1)) assert_size_stride(primals_17, (1024, ), (1, )) assert_size_stride(primals_18, (10, 1024), (1024, 1)) assert_size_stride(primals_19, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] stream0 = get_raw_stream(0) triton_poi_fused_0.run(primals_1, buf0, 192, 9, grid=grid(192, 9), stream=stream0) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_1.run(primals_3, buf1, 12, 4096, grid=grid(12, 4096), stream=stream0) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_2.run(primals_4, buf2, 8192, 9, grid=grid(8192, 9), stream=stream0) del primals_4 buf3 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_3.run(primals_6, buf3, 32768, 9, grid=grid(32768, 9), stream=stream0) del primals_6 buf4 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) # Unsorted Source Nodes: [], Original ATen: [] triton_poi_fused_4.run(primals_8, buf4, 131072, 9, grid=grid(131072, 9), stream=stream0) del primals_8 # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf6 = buf5; del buf5 # reuse buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) # Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_5.run(buf6, primals_2, buf7, 1048576, grid=grid(1048576), stream=stream0) del primals_2 buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_6.run(buf7, buf8, buf9, 262144, grid=grid(262144), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf10 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf11 = buf10; del buf10 # reuse buf12 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) # Topologically Sorted Source Nodes: [conv2d_1, x_2], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_7.run(buf11, primals_5, buf12, 524288, grid=grid(524288), stream=stream0) del primals_5 buf13 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf14 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_8.run(buf12, buf13, buf14, 131072, grid=grid(131072), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution] buf15 = extern_kernels.convolution(buf13, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf16 = buf15; del buf15 # reuse buf17 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) # Topologically Sorted Source Nodes: [conv2d_2, x_4], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_9.run(buf16, primals_7, buf17, 262144, grid=grid(262144), stream=stream0) del primals_7 buf18 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf19 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_10.run(buf17, buf18, buf19, 65536, grid=grid(65536), stream=stream0) # Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution] buf20 = extern_kernels.convolution(buf18, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf21 = buf20; del buf20 # reuse buf22 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) # Topologically Sorted Source Nodes: [conv2d_3, x_6], Original ATen: [aten.convolution, aten.elu] triton_poi_fused_convolution_elu_11.run(buf21, primals_9, buf22, 131072, grid=grid(131072), stream=stream0) del primals_9 buf23 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) buf24 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.max_pool2d_with_indices] triton_poi_fused_max_pool2d_with_indices_12.run(buf22, buf23, buf24, 64, 512, grid=grid(64, 512), stream=stream0) buf25 = empty_strided_cuda((16, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_11, reinterpret_tensor(buf24, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_10, (2048, 128), (1, 2048), 0), alpha=1, beta=1, out=buf25) del primals_11 buf26 = empty_strided_cuda((16, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.elu] triton_poi_fused_elu_13.run(buf25, buf26, 2048, grid=grid(2048), stream=stream0) buf27 = empty_strided_cuda((16, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_13, buf26, reinterpret_tensor(primals_12, (128, 256), (1, 128), 0), alpha=1, beta=1, out=buf27) del primals_13 buf28 = empty_strided_cuda((16, 256), (256, 1), torch.float32) # Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.elu] triton_poi_fused_elu_14.run(buf27, buf28, 4096, grid=grid(4096), stream=stream0) buf29 = empty_strided_cuda((16, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.addmm] extern_kernels.addmm(primals_15, buf28, reinterpret_tensor(primals_14, (256, 512), (1, 256), 0), alpha=1, beta=1, out=buf29) del primals_15 buf30 = empty_strided_cuda((16, 512), (512, 1), torch.float32) # Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.elu] triton_poi_fused_elu_15.run(buf29, buf30, 8192, grid=grid(8192), stream=stream0) buf31 = empty_strided_cuda((16, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_17, buf30, reinterpret_tensor(primals_16, (512, 1024), (1, 512), 0), alpha=1, beta=1, out=buf31) del primals_17 buf32 = empty_strided_cuda((16, 1024), (1024, 1), torch.float32) # Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.elu] triton_poi_fused_elu_16.run(buf31, buf32, 16384, grid=grid(16384), stream=stream0) buf33 = empty_strided_cuda((16, 10), (10, 1), torch.float32) # Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.addmm] extern_kernels.addmm(primals_19, buf32, reinterpret_tensor(primals_18, (1024, 10), (1, 1024), 0), alpha=1, beta=1, out=buf33) del primals_19 return (buf33, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf9, buf11, buf12, buf13, buf14, buf16, buf17, buf18, buf19, buf21, buf22, buf23, reinterpret_tensor(buf24, (16, 2048), (2048, 1), 0), buf25, buf26, buf27, buf28, buf29, buf30, buf31, buf32, primals_18, primals_16, primals_14, primals_12, primals_10, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_10 = rand_strided((128, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_12 = rand_strided((256, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_13 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32) primals_14 = rand_strided((512, 256), (256, 1), device='cuda:0', dtype=torch.float32) primals_15 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32) primals_16 = rand_strided((1024, 512), (512, 1), device='cuda:0', dtype=torch.float32) primals_17 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32) primals_18 = rand_strided((10, 1024), (1024, 1), device='cuda:0', dtype=torch.float32) primals_19 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 192 xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask & ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 27 * y1), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 12 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 3 y1 = yindex // 3 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 3 * x2 + 12288 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 64 y1 = yindex // 64 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 64 * x2 + 576 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 128 y1 = yindex // 128 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 128 * x2 + 1152 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): xnumel = 9 yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1) ) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 256 y1 = yindex // 256 tmp0 = tl.load(in_ptr0 + (x2 + 9 * y3), xmask, eviction_policy='evict_last' ) tl.store(out_ptr0 + (y0 + 256 * x2 + 2304 * y1), tmp0, xmask) @triton.jit def triton_poi_fused_convolution_elu_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 64 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_6(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 64 x1 = xindex // 64 % 32 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 128 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (64 + x0 + 128 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 128 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4160 + x0 + 128 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_7(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 128 x1 = xindex // 128 % 16 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (128 + x0 + 256 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 256 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4224 + x0 + 256 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_9(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_10(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 256 x1 = xindex // 256 % 8 x2 = xindex // 2048 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 512 * x1 + 8192 * x2), None) tmp1 = tl.load(in_ptr0 + (256 + x0 + 512 * x1 + 8192 * x2), None) tmp3 = tl.load(in_ptr0 + (4096 + x0 + 512 * x1 + 8192 * x2), None) tmp5 = tl.load(in_ptr0 + (4352 + x0 + 512 * x1 + 8192 * x2), None) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x3, tmp6, None) tl.store(out_ptr1 + x3, tmp16, None) @triton.jit def triton_poi_fused_convolution_elu_11(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 512 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 1.0 tmp6 = tmp2 * tmp5 tmp7 = libdevice.expm1(tmp6) tmp8 = tmp7 * tmp5 tmp9 = tl.where(tmp4, tmp6, tmp8) tl.store(in_out_ptr0 + x2, tmp2, None) tl.store(out_ptr0 + x2, tmp9, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_12(in_ptr0, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 512 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y5 = yindex y4 = yindex // 16 y6 = yindex % 16 tmp0 = tl.load(in_ptr0 + (x2 + 1024 * y0 + 8192 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (512 + x2 + 1024 * y0 + 8192 * y1), xmask & ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4096 + x2 + 1024 * y0 + 8192 * y1), xmask & ymask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr0 + (4608 + x2 + 1024 * y0 + 8192 * y1), xmask & ymask, eviction_policy='evict_last') tmp2 = tmp1 > tmp0 tmp3 = tl.full([1, 1], 1, tl.int8) tmp4 = tl.full([1, 1], 0, tl.int8) tmp5 = tl.where(tmp2, tmp3, tmp4) tmp6 = triton_helpers.maximum(tmp1, tmp0) tmp8 = tmp7 > tmp6 tmp9 = tl.full([1, 1], 2, tl.int8) tmp10 = tl.where(tmp8, tmp9, tmp5) tmp11 = triton_helpers.maximum(tmp7, tmp6) tmp13 = tmp12 > tmp11 tmp14 = tl.full([1, 1], 3, tl.int8) tmp15 = tl.where(tmp13, tmp14, tmp10) tmp16 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + (x2 + 512 * y5), tmp15, xmask & ymask) tl.store(out_ptr1 + (y6 + 16 * x2 + 8192 * y4), tmp16, xmask & ymask) @triton.jit def triton_poi_fused_elu_13(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_elu_14(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_elu_15(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) @triton.jit def triton_poi_fused_elu_16(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_ptr0 + x0, None) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tl.store(out_ptr0 + x0, tmp7, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19) = args args.clear() assert_size_stride(primals_1, (64, 3, 3, 3), (27, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1)) assert_size_stride(primals_4, (128, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (256, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (256,), (1,)) assert_size_stride(primals_8, (512, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_9, (512,), (1,)) assert_size_stride(primals_10, (128, 2048), (2048, 1)) assert_size_stride(primals_11, (128,), (1,)) assert_size_stride(primals_12, (256, 128), (128, 1)) assert_size_stride(primals_13, (256,), (1,)) assert_size_stride(primals_14, (512, 256), (256, 1)) assert_size_stride(primals_15, (512,), (1,)) assert_size_stride(primals_16, (1024, 512), (512, 1)) assert_size_stride(primals_17, (1024,), (1,)) assert_size_stride(primals_18, (10, 1024), (1024, 1)) assert_size_stride(primals_19, (10,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 3, 3, 3), (27, 1, 9, 3), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(192, 9)](primals_1, buf0, 192, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 3, 64, 64), (12288, 1, 192, 3), torch .float32) triton_poi_fused_1[grid(12, 4096)](primals_3, buf1, 12, 4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1) del primals_3 buf2 = empty_strided_cuda((128, 64, 3, 3), (576, 1, 192, 64), torch .float32) triton_poi_fused_2[grid(8192, 9)](primals_4, buf2, 8192, 9, XBLOCK= 16, YBLOCK=64, num_warps=4, num_stages=1) del primals_4 buf3 = empty_strided_cuda((256, 128, 3, 3), (1152, 1, 384, 128), torch.float32) triton_poi_fused_3[grid(32768, 9)](primals_6, buf3, 32768, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_6 buf4 = empty_strided_cuda((512, 256, 3, 3), (2304, 1, 768, 256), torch.float32) triton_poi_fused_4[grid(131072, 9)](primals_8, buf4, 131072, 9, XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1) del primals_8 buf5 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 64, 64, 64), (262144, 1, 4096, 64)) buf6 = buf5 del buf5 buf7 = empty_strided_cuda((4, 64, 64, 64), (262144, 1, 4096, 64), torch.float32) triton_poi_fused_convolution_elu_5[grid(1048576)](buf6, primals_2, buf7, 1048576, XBLOCK=512, num_warps=8, num_stages=1) del primals_2 buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.float32) buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1, 2048, 64), torch.int8) triton_poi_fused_max_pool2d_with_indices_6[grid(262144)](buf7, buf8, buf9, 262144, XBLOCK=512, num_warps=8, num_stages=1) buf10 = extern_kernels.convolution(buf8, buf2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf10, (4, 128, 32, 32), (131072, 1, 4096, 128)) buf11 = buf10 del buf10 buf12 = empty_strided_cuda((4, 128, 32, 32), (131072, 1, 4096, 128), torch.float32) triton_poi_fused_convolution_elu_7[grid(524288)](buf11, primals_5, buf12, 524288, XBLOCK=512, num_warps=8, num_stages=1) del primals_5 buf13 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.float32) buf14 = empty_strided_cuda((4, 128, 16, 16), (32768, 1, 2048, 128), torch.int8) triton_poi_fused_max_pool2d_with_indices_8[grid(131072)](buf12, buf13, buf14, 131072, XBLOCK=512, num_warps=8, num_stages=1) buf15 = extern_kernels.convolution(buf13, buf3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf15, (4, 256, 16, 16), (65536, 1, 4096, 256)) buf16 = buf15 del buf15 buf17 = empty_strided_cuda((4, 256, 16, 16), (65536, 1, 4096, 256), torch.float32) triton_poi_fused_convolution_elu_9[grid(262144)](buf16, primals_7, buf17, 262144, XBLOCK=512, num_warps=8, num_stages=1) del primals_7 buf18 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.float32) buf19 = empty_strided_cuda((4, 256, 8, 8), (16384, 1, 2048, 256), torch.int8) triton_poi_fused_max_pool2d_with_indices_10[grid(65536)](buf17, buf18, buf19, 65536, XBLOCK=512, num_warps=4, num_stages=1) buf20 = extern_kernels.convolution(buf18, buf4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf20, (4, 512, 8, 8), (32768, 1, 4096, 512)) buf21 = buf20 del buf20 buf22 = empty_strided_cuda((4, 512, 8, 8), (32768, 1, 4096, 512), torch.float32) triton_poi_fused_convolution_elu_11[grid(131072)](buf21, primals_9, buf22, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_9 buf23 = empty_strided_cuda((4, 512, 4, 4), (8192, 1, 2048, 512), torch.int8) buf24 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch. float32) triton_poi_fused_max_pool2d_with_indices_12[grid(64, 512)](buf22, buf23, buf24, 64, 512, XBLOCK=4, YBLOCK=64, num_warps=4, num_stages=1) buf25 = empty_strided_cuda((16, 128), (128, 1), torch.float32) extern_kernels.addmm(primals_11, reinterpret_tensor(buf24, (16, 2048), (2048, 1), 0), reinterpret_tensor(primals_10, (2048, 128 ), (1, 2048), 0), alpha=1, beta=1, out=buf25) del primals_11 buf26 = empty_strided_cuda((16, 128), (128, 1), torch.float32) triton_poi_fused_elu_13[grid(2048)](buf25, buf26, 2048, XBLOCK=128, num_warps=4, num_stages=1) buf27 = empty_strided_cuda((16, 256), (256, 1), torch.float32) extern_kernels.addmm(primals_13, buf26, reinterpret_tensor( primals_12, (128, 256), (1, 128), 0), alpha=1, beta=1, out=buf27) del primals_13 buf28 = empty_strided_cuda((16, 256), (256, 1), torch.float32) triton_poi_fused_elu_14[grid(4096)](buf27, buf28, 4096, XBLOCK=256, num_warps=4, num_stages=1) buf29 = empty_strided_cuda((16, 512), (512, 1), torch.float32) extern_kernels.addmm(primals_15, buf28, reinterpret_tensor( primals_14, (256, 512), (1, 256), 0), alpha=1, beta=1, out=buf29) del primals_15 buf30 = empty_strided_cuda((16, 512), (512, 1), torch.float32) triton_poi_fused_elu_15[grid(8192)](buf29, buf30, 8192, XBLOCK=256, num_warps=4, num_stages=1) buf31 = empty_strided_cuda((16, 1024), (1024, 1), torch.float32) extern_kernels.addmm(primals_17, buf30, reinterpret_tensor( primals_16, (512, 1024), (1, 512), 0), alpha=1, beta=1, out=buf31) del primals_17 buf32 = empty_strided_cuda((16, 1024), (1024, 1), torch.float32) triton_poi_fused_elu_16[grid(16384)](buf31, buf32, 16384, XBLOCK= 256, num_warps=4, num_stages=1) buf33 = empty_strided_cuda((16, 10), (10, 1), torch.float32) extern_kernels.addmm(primals_19, buf32, reinterpret_tensor( primals_18, (1024, 10), (1, 1024), 0), alpha=1, beta=1, out=buf33) del primals_19 return (buf33, buf0, buf1, buf2, buf3, buf4, buf6, buf7, buf8, buf9, buf11, buf12, buf13, buf14, buf16, buf17, buf18, buf19, buf21, buf22, buf23, reinterpret_tensor(buf24, (16, 2048), (2048, 1), 0), buf25, buf26, buf27, buf28, buf29, buf30, buf31, buf32, primals_18, primals_16, primals_14, primals_12, primals_10) class Net2New(nn.Module): def __init__(self): super(Net2New, self).__init__() self.conv1 = nn.Conv2d(3, 64, 3, padding=1) self.conv2 = nn.Conv2d(64, 128, 3, padding=1) self.conv3 = nn.Conv2d(128, 256, 3, padding=1) self.conv4 = nn.Conv2d(256, 512, 3, padding=1) self.pool1 = nn.MaxPool2d(2, 2) self.pool2 = nn.MaxPool2d(2, 2) self.pool3 = nn.MaxPool2d(2, 2) self.pool4 = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(512 * 2 * 2, 128) self.fc2 = nn.Linear(128, 256) self.fc3 = nn.Linear(256, 512) self.fc4 = nn.Linear(512, 1024) self.fc5 = nn.Linear(1024, 10) def linear_layer_ids(self): return [12, 14, 16] def linear_layer_parameters(self): linear1 = torch.cat([x.view(-1) for x in self.fc1.parameters() or self.fc2.parameters() or self.fc3.parameters() or self.fc4. parameters() or self.fc5.parameters()]) return linear1 def train_order_block_ids(self): return [[14, 15], [4, 5], [2, 3], [8, 9], [16, 17], [12, 13], [6, 7 ], [0, 1], [10, 11]] def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_8 = self.conv4.weight primals_9 = self.conv4.bias primals_10 = self.fc1.weight primals_11 = self.fc1.bias primals_12 = self.fc2.weight primals_13 = self.fc2.bias primals_14 = self.fc3.weight primals_15 = self.fc3.bias primals_16 = self.fc4.weight primals_17 = self.fc4.bias primals_18 = self.fc5.weight primals_19 = self.fc5.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19]) return output[0]
SarodYatawatta/federated-pytorch-test
Net2
false
8,763
[ "Apache-2.0" ]
33
42a51ba12a92b32fa19273340d5b61e74e11d8e0
https://github.com/SarodYatawatta/federated-pytorch-test/tree/42a51ba12a92b32fa19273340d5b61e74e11d8e0
FocalLossBinary
import torch import torch.jit import torch.nn.functional as F import torch.nn.functional from functools import partial from torch.nn.modules.loss import _Loss def reduced_focal_loss(outputs: 'torch.Tensor', targets: 'torch.Tensor', threshold: 'float'=0.5, gamma: 'float'=2.0, reduction='mean'): """ Compute reduced focal loss between target and output logits. Source https://github.com/BloodAxe/pytorch-toolbelt See :class:`~pytorch_toolbelt.losses` for details. Args: outputs: Tensor of arbitrary shape targets: Tensor of the same shape as input reduction (string, optional): Specifies the reduction to apply to the output: "none" | "mean" | "sum" | "batchwise_mean". "none": no reduction will be applied, "mean": the sum of the output will be divided by the number of elements in the output, "sum": the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. "batchwise_mean" computes mean loss per sample in batch. Default: "mean" See https://arxiv.org/abs/1903.01347 """ targets = targets.type(outputs.type()) logpt = -F.binary_cross_entropy_with_logits(outputs, targets, reduction ='none') pt = torch.exp(logpt) focal_reduction = ((1.0 - pt) / threshold).pow(gamma) focal_reduction[pt < threshold] = 1 loss = -focal_reduction * logpt if reduction == 'mean': loss = loss.mean() if reduction == 'sum': loss = loss.sum() if reduction == 'batchwise_mean': loss = loss.sum(0) return loss def sigmoid_focal_loss(outputs: 'torch.Tensor', targets: 'torch.Tensor', gamma: 'float'=2.0, alpha: 'float'=0.25, reduction: 'str'='mean'): """ Compute binary focal loss between target and output logits. Source https://github.com/BloodAxe/pytorch-toolbelt See :class:`~pytorch_toolbelt.losses` for details. Args: outputs: Tensor of arbitrary shape targets: Tensor of the same shape as input reduction (string, optional): Specifies the reduction to apply to the output: "none" | "mean" | "sum" | "batchwise_mean". "none": no reduction will be applied, "mean": the sum of the output will be divided by the number of elements in the output, "sum": the output will be summed. See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/loss/losses.py # noqa: E501 """ targets = targets.type(outputs.type()) logpt = -F.binary_cross_entropy_with_logits(outputs, targets, reduction ='none') pt = torch.exp(logpt) loss = -(1 - pt).pow(gamma) * logpt if alpha is not None: loss = loss * (alpha * targets + (1 - alpha) * (1 - targets)) if reduction == 'mean': loss = loss.mean() if reduction == 'sum': loss = loss.sum() if reduction == 'batchwise_mean': loss = loss.sum(0) return loss class FocalLossBinary(_Loss): def __init__(self, ignore: 'int'=None, reduced: 'bool'=False, gamma: 'float'=2.0, alpha: 'float'=0.25, threshold: 'float'=0.5, reduction: 'str'='mean'): """ Compute focal loss for binary classification problem. """ super().__init__() self.ignore = ignore if reduced: self.loss_fn = partial(reduced_focal_loss, gamma=gamma, threshold=threshold, reduction=reduction) else: self.loss_fn = partial(sigmoid_focal_loss, gamma=gamma, alpha= alpha, reduction=reduction) def forward(self, logits, targets): """ Args: logits: [bs; ...] targets: [bs; ...] """ targets = targets.view(-1) logits = logits.view(-1) if self.ignore is not None: not_ignored = targets != self.ignore logits = logits[not_ignored] targets = targets[not_ignored] loss = self.loss_fn(logits, targets) return loss def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.jit import torch.nn.functional as F import torch.nn.functional from functools import partial from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0( in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = -tmp12 tmp14 = tl_math.exp(tmp13) tmp15 = tmp1 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = -tmp16 tmp18 = tmp17 * tmp13 tmp19 = 0.25 tmp20 = tmp0 * tmp19 tmp21 = 0.75 tmp22 = tmp2 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = tmp18 * tmp23 tmp25 = tl.broadcast_to(tmp24, [RBLOCK]) tmp27 = triton_helpers.promote_to_tensor(tl.sum(tmp25, 0)) tmp28 = 256.0 tmp29 = tmp27 / tmp28 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp29, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_binary_cross_entropy_with_logits_exp_mean_mul_neg_pow_rsub_0[ grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf1, def reduced_focal_loss(outputs: 'torch.Tensor', targets: 'torch.Tensor', threshold: 'float'=0.5, gamma: 'float'=2.0, reduction='mean'): """ Compute reduced focal loss between target and output logits. Source https://github.com/BloodAxe/pytorch-toolbelt See :class:`~pytorch_toolbelt.losses` for details. Args: outputs: Tensor of arbitrary shape targets: Tensor of the same shape as input reduction (string, optional): Specifies the reduction to apply to the output: "none" | "mean" | "sum" | "batchwise_mean". "none": no reduction will be applied, "mean": the sum of the output will be divided by the number of elements in the output, "sum": the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. "batchwise_mean" computes mean loss per sample in batch. Default: "mean" See https://arxiv.org/abs/1903.01347 """ targets = targets.type(outputs.type()) logpt = -F.binary_cross_entropy_with_logits(outputs, targets, reduction ='none') pt = torch.exp(logpt) focal_reduction = ((1.0 - pt) / threshold).pow(gamma) focal_reduction[pt < threshold] = 1 loss = -focal_reduction * logpt if reduction == 'mean': loss = loss.mean() if reduction == 'sum': loss = loss.sum() if reduction == 'batchwise_mean': loss = loss.sum(0) return loss def sigmoid_focal_loss(outputs: 'torch.Tensor', targets: 'torch.Tensor', gamma: 'float'=2.0, alpha: 'float'=0.25, reduction: 'str'='mean'): """ Compute binary focal loss between target and output logits. Source https://github.com/BloodAxe/pytorch-toolbelt See :class:`~pytorch_toolbelt.losses` for details. Args: outputs: Tensor of arbitrary shape targets: Tensor of the same shape as input reduction (string, optional): Specifies the reduction to apply to the output: "none" | "mean" | "sum" | "batchwise_mean". "none": no reduction will be applied, "mean": the sum of the output will be divided by the number of elements in the output, "sum": the output will be summed. See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/loss/losses.py # noqa: E501 """ targets = targets.type(outputs.type()) logpt = -F.binary_cross_entropy_with_logits(outputs, targets, reduction ='none') pt = torch.exp(logpt) loss = -(1 - pt).pow(gamma) * logpt if alpha is not None: loss = loss * (alpha * targets + (1 - alpha) * (1 - targets)) if reduction == 'mean': loss = loss.mean() if reduction == 'sum': loss = loss.sum() if reduction == 'batchwise_mean': loss = loss.sum(0) return loss class FocalLossBinaryNew(_Loss): def __init__(self, ignore: 'int'=None, reduced: 'bool'=False, gamma: 'float'=2.0, alpha: 'float'=0.25, threshold: 'float'=0.5, reduction: 'str'='mean'): """ Compute focal loss for binary classification problem. """ super().__init__() self.ignore = ignore if reduced: self.loss_fn = partial(reduced_focal_loss, gamma=gamma, threshold=threshold, reduction=reduction) else: self.loss_fn = partial(sigmoid_focal_loss, gamma=gamma, alpha= alpha, reduction=reduction) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
CamilaGL/nnUNet
FocalLossBinary
false
185
[ "Apache-2.0" ]
0
471ab73a6e4f67fc72d476183b5344be4cccf7ca
https://github.com/CamilaGL/nnUNet/tree/471ab73a6e4f67fc72d476183b5344be4cccf7ca
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/cn/ccnj2zvsdjzgpa7neirpbn7na3ekiicqluna2zx5kbsudbicfbfc.py # Topologically Sorted Source Nodes: [mean, std, sub, add, normed, mul, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mean => mean # mul => mul # normed => div # std => sqrt, var # sub => sub # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {}) # %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1]), kwargs = {correction: 1.0, keepdim: True}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %add), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {}) triton_poi_fused_add_div_mean_mul_std_sub_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 4 x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x3), xmask) tmp2 = tl.load(in_ptr1 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = 1e-06 tmp27 = tmp25 + tmp26 tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + (x3), tmp31, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_3, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mean, std, sub, add, normed, mul, add_1], Original ATen: [aten.mean, aten.std, aten.sub, aten.add, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0.run(primals_2, primals_1, primals_3, buf0, 256, grid=grid(256), stream=stream0) del primals_2 del primals_3 return (buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 4 x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x3, xmask) tmp2 = tl.load(in_ptr1 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp5 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp30 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp2 - tmp10 tmp13 = tmp12 * tmp12 tmp14 = tmp3 - tmp10 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp10 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp7 - tmp10 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp23 = 3.0 tmp24 = tmp22 / tmp23 tmp25 = libdevice.sqrt(tmp24) tmp26 = 1e-06 tmp27 = tmp25 + tmp26 tmp28 = tmp11 / tmp27 tmp29 = tmp0 * tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x3, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 1, 1), (1, 1, 1)) assert_size_stride(primals_3, (4, 1, 1), (1, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class LayerNormNew(nn.Module): def __init__(self, size, eps=1e-06): super(LayerNormNew, self).__init__() self.eps = eps self.weight = nn.Parameter(torch.ones(size, 1, 1)) self.bias = nn.Parameter(torch.zeros(size, 1, 1)) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ParadoxZW/CosAttention2d
LayerNorm
false
5,708
[ "Apache-2.0" ]
1
19b3e655cf0ebc40721b806eb46a3132c488a188
https://github.com/ParadoxZW/CosAttention2d/tree/19b3e655cf0ebc40721b806eb46a3132c488a188
LinearEmbedding
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/xz/cxz7i3qbiizfbbzvas22bbwy5nxzvmtfdg5vhhiye56dk4hdonst.py # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul => mul # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 2.0), kwargs = {}) triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_0.run(buf1, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](buf1, primals_2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class LinearEmbeddingNew(nn.Module): def __init__(self, inp_size, d_model): super(LinearEmbeddingNew, self).__init__() self.lut = nn.Linear(inp_size, d_model) self.d_model = d_model def forward(self, input_0): primals_1 = self.lut.weight primals_2 = self.lut.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
swift88-clone/Trajectory-Transformer
LinearEmbedding
false
13,011
[ "MIT" ]
0
62983b645ec88d8972bc2c2af1b7b4a299d3feb0
https://github.com/swift88-clone/Trajectory-Transformer/tree/62983b645ec88d8972bc2c2af1b7b4a299d3feb0
C
import torch import torch.nn as nn class C(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride, padding, activation=None): """ At the final layer, a 3x3 convolution is used to map each 64-component feature vector to the desired number of classes. :param input_channel: input channel size :param output_channel: output channel size """ super(C, self).__init__() if activation == 'sigmoid': self.layer = nn.Sequential([nn.Conv2d(input_channel, output_channel, kernel_size=kernel_size, stride=stride, padding=padding), nn.Sigmoid()]) elif activation == 'tanh': self.layer = nn.Sequential([nn.Conv2d(input_channel, output_channel, kernel_size=kernel_size, stride=stride, padding=padding), nn.Tanh()]) else: self.layer = nn.Conv2d(input_channel, output_channel, kernel_size=kernel_size, stride=stride, padding=padding) def forward(self, x): return self.layer(x) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_channel': 4, 'output_channel': 4, 'kernel_size': 4, 'stride': 1, 'padding': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 81 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(1296)](buf1, primals_2, 1296, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, primals_1, primals_3 class CNew(nn.Module): def __init__(self, input_channel, output_channel, kernel_size, stride, padding, activation=None): """ At the final layer, a 3x3 convolution is used to map each 64-component feature vector to the desired number of classes. :param input_channel: input channel size :param output_channel: output channel size """ super(CNew, self).__init__() if activation == 'sigmoid': self.layer = nn.Sequential([nn.Conv2d(input_channel, output_channel, kernel_size=kernel_size, stride=stride, padding=padding), nn.Sigmoid()]) elif activation == 'tanh': self.layer = nn.Sequential([nn.Conv2d(input_channel, output_channel, kernel_size=kernel_size, stride=stride, padding=padding), nn.Tanh()]) else: self.layer = nn.Conv2d(input_channel, output_channel, kernel_size=kernel_size, stride=stride, padding=padding) def forward(self, input_0): primals_1 = self.layer.weight primals_2 = self.layer.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Nikronic/Deep-Halftoning
C
false
893
[ "MIT" ]
0
9564c592abf139ccab2791c1dbb354505edab5f9
https://github.com/Nikronic/Deep-Halftoning/tree/9564c592abf139ccab2791c1dbb354505edab5f9
C1
import torch import torch.nn as nn from collections import OrderedDict class C1(nn.Module): def __init__(self): super(C1, self).__init__() self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(1, 6, kernel_size=(5, 5))), ('relu1', nn.ReLU()), ('s1', nn.MaxPool2d (kernel_size=(2, 2), stride=2))])) def forward(self, img): output = self.c1(img) return output def get_inputs(): return [torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from collections import OrderedDict assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 86400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 3600 % 6 x0 = xindex % 3600 x4 = xindex // 3600 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + (x0 + 3616 * x4), tmp4, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 21600 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 30 x1 = xindex // 30 % 30 x2 = xindex // 900 x5 = xindex x4 = xindex // 5400 x6 = xindex % 5400 tmp0 = tl.load(in_ptr0 + (2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (60 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (61 + 2 * x0 + 120 * x1 + 3616 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = tmp1 > tmp0 tmp8 = tl.full([1], 1, tl.int8) tmp9 = tl.full([1], 0, tl.int8) tmp10 = tl.where(tmp7, tmp8, tmp9) tmp11 = tmp3 > tmp2 tmp12 = tl.full([1], 2, tl.int8) tmp13 = tl.where(tmp11, tmp12, tmp10) tmp14 = tmp5 > tmp4 tmp15 = tl.full([1], 3, tl.int8) tmp16 = tl.where(tmp14, tmp15, tmp13) tl.store(out_ptr0 + x5, tmp6, xmask) tl.store(out_ptr1 + (x6 + 5504 * x4), tmp16, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (6, 1, 5, 5), (25, 25, 5, 1)) assert_size_stride(primals_2, (6,), (1,)) assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 6, 60, 60), (21600, 3600, 60, 1)) buf1 = empty_strided_cuda((4, 6, 60, 60), (21696, 3616, 60, 1), torch.float32) get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(86400)](buf0, primals_2, buf1, 86400, XBLOCK=1024, num_warps=4, num_stages=1) del buf0 del primals_2 buf2 = empty_strided_cuda((4, 6, 30, 30), (5400, 900, 30, 1), torch .float32) buf3 = empty_strided_cuda((4, 6, 30, 30), (5504, 900, 30, 1), torch .int8) triton_poi_fused_max_pool2d_with_indices_1[grid(21600)](buf1, buf2, buf3, 21600, XBLOCK=256, num_warps=4, num_stages=1) return buf2, primals_1, primals_3, buf1, buf3 class C1New(nn.Module): def __init__(self): super(C1New, self).__init__() self.c1 = nn.Sequential(OrderedDict([('c1', nn.Conv2d(1, 6, kernel_size=(5, 5))), ('relu1', nn.ReLU()), ('s1', nn.MaxPool2d (kernel_size=(2, 2), stride=2))])) def forward(self, input_0): primals_1 = self.c1.c1.weight primals_2 = self.c1.c1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
zjgbz/img_cls
C1
false
4,664
[ "MIT" ]
0
513d5ae423d95e008a82a6ffe443db49f8ed9ac2
https://github.com/zjgbz/img_cls/tree/513d5ae423d95e008a82a6ffe443db49f8ed9ac2
Highway
import torch from torch import nn import torch.nn.functional as F class NonCausalConv1d(nn.Module): """Non causal Conv1d with appropriate padding to ensure sequence length stays the same. Note Convolutions always have stride of 1 following layout in paper. """ def __init__(self, in_channels, out_channels, kernel_size, dilation): super().__init__() padding = (kernel_size - 1) * dilation // 2 self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, 1, padding, dilation) def forward(self, x): """ Inputs: x(batch_size x input_dim x seq_len) """ return self.conv(x) class CausalConv1d(nn.Module): """Causal conv1d with appropriate padding to ensure sequence length stays the same. Note Convolutions always have stride of 1 following layout in paper. """ def __init__(self, in_channels, out_channels, kernel_size, dilation): super().__init__() self.padding = (kernel_size - 1) * dilation self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, 1, self.padding, dilation) def forward(self, x): """ Inputs: x(batch_size x input_dim x seq_len) """ x = self.conv(x) if self.padding > 0: return x[:, :, :-self.padding].contiguous() else: return x class Highway(nn.Module): """Highway network with conv1d """ def __init__(self, hidden, kernel_size, dilation, causal=False): self.d = hidden super().__init__() if causal: self.conv = CausalConv1d(hidden, 2 * hidden, kernel_size, dilation) else: self.conv = NonCausalConv1d(hidden, 2 * hidden, kernel_size, dilation) def forward(self, x): """ Inputs: x(batch_size x input_dim x seq_len) """ Hout = self.conv(x) H1 = Hout[:, :self.d, :] H2 = Hout[:, self.d:, :] return F.sigmoid(H1) * H2 + (1 - F.sigmoid(H1)) * x def get_inputs(): return [torch.rand([4, 4, 2])] def get_init_inputs(): return [[], {'hidden': 4, 'kernel_size': 4, 'dilation': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) @triton.jit def triton_poi_fused_add_mul_rsub_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 2 % 4 x2 = xindex // 8 x3 = xindex tmp0 = tl.load(in_ptr0 + (x1 + 8 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tl.load(in_ptr0 + (4 + x1 + 8 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr1 + x3, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = 1.0 tmp5 = tmp4 - tmp1 tmp7 = tmp5 * tmp6 tmp8 = tmp3 + tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 2), (8, 2, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(1,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 1), (8, 1, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(32)](buf1, primals_2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused_add_mul_rsub_sigmoid_1[grid(32)](buf1, primals_3, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) return buf2, primals_1, primals_3, buf1 class NonCausalConv1d(nn.Module): """Non causal Conv1d with appropriate padding to ensure sequence length stays the same. Note Convolutions always have stride of 1 following layout in paper. """ def __init__(self, in_channels, out_channels, kernel_size, dilation): super().__init__() padding = (kernel_size - 1) * dilation // 2 self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, 1, padding, dilation) def forward(self, x): """ Inputs: x(batch_size x input_dim x seq_len) """ return self.conv(x) class CausalConv1d(nn.Module): """Causal conv1d with appropriate padding to ensure sequence length stays the same. Note Convolutions always have stride of 1 following layout in paper. """ def __init__(self, in_channels, out_channels, kernel_size, dilation): super().__init__() self.padding = (kernel_size - 1) * dilation self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, 1, self.padding, dilation) def forward(self, x): """ Inputs: x(batch_size x input_dim x seq_len) """ x = self.conv(x) if self.padding > 0: return x[:, :, :-self.padding].contiguous() else: return x class HighwayNew(nn.Module): """Highway network with conv1d """ def __init__(self, hidden, kernel_size, dilation, causal=False): self.d = hidden super().__init__() if causal: self.conv = CausalConv1d(hidden, 2 * hidden, kernel_size, dilation) else: self.conv = NonCausalConv1d(hidden, 2 * hidden, kernel_size, dilation) def forward(self, input_0): primals_1 = self.conv.conv.weight primals_2 = self.conv.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
lstsm12345/DCTTS-PyTorch
Highway
false
3,944
[ "MIT" ]
0
d44b9407b654abc2069bd2a7ef6231572ace1fa7
https://github.com/lstsm12345/DCTTS-PyTorch/tree/d44b9407b654abc2069bd2a7ef6231572ace1fa7
MuLawDecoding
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/xs/cxspvk4ioiwtqm3hkzwl34vomr2i6gck6kjiao23ufljwmu7xw3g.py # Topologically Sorted Source Nodes: [mu, truediv, mul, x, sign, abs_1, log1p, mul_1, exp, sub_1, mul_2, x_1], Original ATen: [aten.lift_fresh, aten.div, aten.mul, aten.sub, aten.sign, aten.abs, aten.log1p, aten.exp] # Source node to ATen node mapping: # abs_1 => abs_1 # exp => exp # log1p => full_default_1 # mu => full_default # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # sign => sign # sub_1 => sub_1 # truediv => div # x => sub # x_1 => div_1 # Graph fragment: # %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([], 255.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %full_default), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 2), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1.0), kwargs = {}) # %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%sub,), kwargs = {}) # %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%sub,), kwargs = {}) # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 5.545177459716797), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%abs_1, %full_default_1), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%exp, 1.0), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sign, %sub_1), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_2, %full_default), kwargs = {}) triton_poi_fused_abs_div_exp_lift_fresh_log1p_mul_sign_sub_0 = async_compile.triton('triton_poi_fused_abs_div_exp_lift_fresh_log1p_mul_sign_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_div_exp_lift_fresh_log1p_mul_sign_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_abs_div_exp_lift_fresh_log1p_mul_sign_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.00392156862745098 tmp2 = tmp0 * tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = tmp7 < tmp6 tmp9 = tmp8.to(tl.int8) tmp10 = tmp6 < tmp7 tmp11 = tmp10.to(tl.int8) tmp12 = tmp9 - tmp11 tmp13 = tmp12.to(tmp6.dtype) tmp14 = tl_math.abs(tmp6) tmp15 = 5.545177459716797 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 - tmp5 tmp19 = tmp13 * tmp18 tmp20 = tmp19 * tmp1 tl.store(out_ptr0 + (x0), tmp20, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mu, truediv, mul, x, sign, abs_1, log1p, mul_1, exp, sub_1, mul_2, x_1], Original ATen: [aten.lift_fresh, aten.div, aten.mul, aten.sub, aten.sign, aten.abs, aten.log1p, aten.exp] stream0 = get_raw_stream(0) triton_poi_fused_abs_div_exp_lift_fresh_log1p_mul_sign_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_abs_div_exp_lift_fresh_log1p_mul_sign_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.00392156862745098 tmp2 = tmp0 * tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = 1.0 tmp6 = tmp4 - tmp5 tmp7 = tl.full([1], 0, tl.int32) tmp8 = tmp7 < tmp6 tmp9 = tmp8.to(tl.int8) tmp10 = tmp6 < tmp7 tmp11 = tmp10.to(tl.int8) tmp12 = tmp9 - tmp11 tmp13 = tmp12.to(tmp6.dtype) tmp14 = tl_math.abs(tmp6) tmp15 = 5.545177459716797 tmp16 = tmp14 * tmp15 tmp17 = tl_math.exp(tmp16) tmp18 = tmp17 - tmp5 tmp19 = tmp13 * tmp18 tmp20 = tmp19 * tmp1 tl.store(out_ptr0 + x0, tmp20, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_abs_div_exp_lift_fresh_log1p_mul_sign_sub_0[grid(256) ](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, class MuLawDecodingNew(torch.nn.Module): """Decode mu-law encoded signal. For more info see the `Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_ This expects an input with values between 0 and quantization_channels - 1 and returns a signal scaled between -1 and 1. Args: quantization_channels (int, optional): Number of channels. (Default: ``256``) """ __constants__ = ['quantization_channels'] def __init__(self, quantization_channels: 'int'=256) ->None: super(MuLawDecodingNew, self).__init__() self.quantization_channels = quantization_channels def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
tbright17/audio
MuLawDecoding
false
10,917
[ "BSD-2-Clause" ]
0
00d38203e401b8d9472a8f8394a10e2c309be02c
https://github.com/tbright17/audio/tree/00d38203e401b8d9472a8f8394a10e2c309be02c
Encoder_attention
import torch import torch.nn as nn class Encoder_attention(nn.Module): def __init__(self, n_h): super(Encoder_attention, self).__init__() self.linear = nn.Linear(n_h, 1) self.softmax = nn.Softmax(dim=1) def forward(self, x): """Output: X """ x1 = self.linear(x).squeeze() weights = self.softmax(x1).unsqueeze(2) x2 = torch.sum(torch.mul(x, weights), dim=1) return x2, weights.squeeze().clone().detach() def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'n_h': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x3, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 4 x2 = xindex // 16 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x3, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex // 16 x3 = xindex % 16 x0 = xindex % 4 x4 = xindex tmp0 = tl.load(in_ptr0 + (x3 + 64 * x2), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x3 + 64 * x2), xmask) tmp4 = tl.load(in_ptr1 + (4 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (32 + x3 + 64 * x2), xmask) tmp8 = tl.load(in_ptr1 + (8 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (48 + x3 + 64 * x2), xmask) tmp12 = tl.load(in_ptr1 + (12 + x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x4, tmp14, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (1, 4), (4, 1)) assert_size_stride(primals_2, (1,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 1), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_1 del primals_2 buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0) del buf1 triton_poi_fused__softmax_1[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = buf2 del buf2 triton_poi_fused_mul_sum_2[grid(64)](primals_3, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) return buf4, buf3, primals_3, buf3 class Encoder_attentionNew(nn.Module): def __init__(self, n_h): super(Encoder_attentionNew, self).__init__() self.linear = nn.Linear(n_h, 1) self.softmax = nn.Softmax(dim=1) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0], output[1]
CrowdDynamicsLab/InfoMotif
Encoder_attention
false
17,177
[ "BSD-3-Clause" ]
7
cca1ffa14cc94408a5c4c50b7b1707c608e3bc9b
https://github.com/CrowdDynamicsLab/InfoMotif/tree/cca1ffa14cc94408a5c4c50b7b1707c608e3bc9b
BlendLinear
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/fk/cfki6qg2njj4g7taj7ghaesj5brkn444q4jnoahj4egxmycjafqi.py # Topologically Sorted Source Nodes: [sub, mul, add], Original ATen: [aten.sub, aten.mul, aten.add] # Source node to ATen node mapping: # add => add # mul => mul # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %view_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %primals_6), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %mul), kwargs = {}) triton_poi_fused_add_mul_sub_0 = async_compile.triton('triton_poi_fused_add_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + (x2), xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp5 - tmp2 tmp8 = tmp6 * tmp7 tmp9 = tmp2 + tmp8 tl.store(in_out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [sub, mul, add], Original ATen: [aten.sub, aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_sub_0.run(buf2, primals_2, buf1, primals_5, primals_6, 256, grid=grid(256), stream=stream0) del buf1 del primals_2 del primals_5 return (buf2, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x2, xmask) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp5 - tmp2 tmp8 = tmp6 * tmp7 tmp9 = tmp2 + tmp8 tl.store(in_out_ptr0 + x2, tmp9, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_mul_sub_0[grid(256)](buf2, primals_2, buf1, primals_5, primals_6, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_2 del primals_5 return buf2, primals_6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) class BlendLinearNew(nn.Module): def __init__(self, dim_in, dim_out, layer_type=nn.Linear, **unused_kwargs): super(BlendLinearNew, self).__init__() self._layer0 = layer_type(dim_in, dim_out) self._layer1 = layer_type(dim_in, dim_out) def forward(self, input_0, input_1): primals_1 = self._layer0.weight primals_2 = self._layer0.bias primals_4 = self._layer1.weight primals_5 = self._layer1.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
Justin-Tan/ffjord
BlendLinear
false
681
[ "MIT" ]
0
2caf8a4ff84933672fe0d94255d665b3dd7a6791
https://github.com/Justin-Tan/ffjord/tree/2caf8a4ff84933672fe0d94255d665b3dd7a6791
BertOutput
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/hk/chkxni526sa46elz3ztn5esaetlclniw4nbm3ra7wq5sova5mkd5.py # Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean] # Source node to ATen node mapping: # add => add # u => mean # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {}) triton_poi_fused_add_mean_0 = async_compile.triton('triton_poi_fused_add_mean_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (0)) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (1)) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr1 + (2)) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr1 + (3)) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr0 + (x0), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/2a/c2atmp2erpyl7if7c6pxfntsmuvqm7hau7e33v4asqtjrqk6cihx.py # Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub] # Source node to ATen node mapping: # add => add # sub => sub # Graph fragment: # %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_4), kwargs = {}) # %sub : [num_users=3] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {}) triton_poi_fused_add_sub_1 = async_compile.triton('triton_poi_fused_add_sub_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x2), xmask) tmp5 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tl.store(in_out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/dk/cdkbz26wu3wbggculxlxtptc2kytrjlqq5ofmyrlm63hvygsm3da.py # Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] # Source node to ATen node mapping: # add_1 => add_1 # hidden_states_2 => add_2 # mul => mul # pow_1 => pow_1 # s => mean_1 # sqrt => sqrt # x => div # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %div), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_6), kwargs = {}) triton_poi_fused_add_div_mean_mul_pow_sqrt_2 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask) tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-12 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + (x2), tmp21, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [add, u], Original ATen: [aten.add, aten.mean] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_0.run(buf0, primals_2, primals_4, buf1, 64, grid=grid(64), stream=stream0) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [add, sub], Original ATen: [aten.add, aten.sub] triton_poi_fused_add_sub_1.run(buf2, primals_2, primals_4, buf1, 256, grid=grid(256), stream=stream0) del buf1 del primals_2 del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, s, add_1, sqrt, x, mul, hidden_states_2], Original ATen: [aten.pow, aten.mean, aten.add, aten.sqrt, aten.div, aten.mul] triton_poi_fused_add_div_mean_mul_pow_sqrt_2.run(primals_5, buf2, primals_6, buf3, 256, grid=grid(256), stream=stream0) del primals_6 return (buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mean_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 1) tmp8 = tl.broadcast_to(tmp7, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp14 = tl.load(in_ptr1 + 2) tmp15 = tl.broadcast_to(tmp14, [XBLOCK]) tmp17 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp21 = tl.load(in_ptr1 + 3) tmp22 = tl.broadcast_to(tmp21, [XBLOCK]) tmp24 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tmp0 + tmp2 tmp5 = tmp3 + tmp4 tmp9 = tmp6 + tmp8 tmp11 = tmp9 + tmp10 tmp12 = tmp5 + tmp11 tmp16 = tmp13 + tmp15 tmp18 = tmp16 + tmp17 tmp19 = tmp12 + tmp18 tmp23 = tmp20 + tmp22 tmp25 = tmp23 + tmp24 tmp26 = tmp19 + tmp25 tmp27 = 4.0 tmp28 = tmp26 / tmp27 tl.store(out_ptr0 + x0, tmp28, xmask) @triton.jit def triton_poi_fused_add_sub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x2, xmask) tmp5 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 - tmp5 tl.store(in_out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_add_div_mean_mul_pow_sqrt_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp20 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp2 * tmp2 tmp5 = tmp4 * tmp4 tmp6 = tmp3 + tmp5 tmp8 = tmp7 * tmp7 tmp9 = tmp6 + tmp8 tmp11 = tmp10 * tmp10 tmp12 = tmp9 + tmp11 tmp13 = 4.0 tmp14 = tmp12 / tmp13 tmp15 = 1e-12 tmp16 = tmp14 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp1 / tmp17 tmp19 = tmp0 * tmp18 tmp21 = tmp19 + tmp20 tl.store(out_ptr0 + x2, tmp21, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_0[grid(64)](buf0, primals_2, primals_4, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused_add_sub_1[grid(256)](buf2, primals_2, primals_4, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf1 del primals_2 del primals_4 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_mean_mul_pow_sqrt_2[grid(256)](primals_5, buf2, primals_6, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_6 return buf3, primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf2 class DecoderBertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(DecoderBertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class BertOutputNew(nn.Module): def __init__(self, config): super(BertOutputNew, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = DecoderBertLayerNorm(config.hidden_size, eps=1e-12) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_0, input_1): primals_1 = self.dense.weight primals_2 = self.dense.bias primals_5 = self.LayerNorm.weight primals_6 = self.LayerNorm.bias primals_3 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
ArrowLuo/GRACE
BertOutput
false
8,774
[ "Apache-2.0" ]
17
f27b500ba905685c03eee6d91d87adc9ef78b4d1
https://github.com/ArrowLuo/GRACE/tree/f27b500ba905685c03eee6d91d87adc9ef78b4d1
Attention
import torch import torch.nn.functional as F import torch.nn as nn def new_parameter(*size): out = nn.Parameter(torch.FloatTensor(*size)) torch.nn.init.xavier_normal_(out) return out class Attention(nn.Module): def __init__(self, attention_size): super(Attention, self).__init__() self.attention = new_parameter(attention_size, 1) def forward(self, x_in): attention_score = torch.matmul(x_in, self.attention).squeeze() attention_score = F.softmax(attention_score, dim=0).view(x_in.size( 0), x_in.size(1), 1) scored_x = x_in * attention_score condensed_x = torch.sum(scored_x, dim=1) return condensed_x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'attention_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 1), (1, 1)) assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_1, out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__softmax_0[grid(16)](buf0, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused__softmax_1[grid(16)](buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused_mul_sum_2[grid(16)](primals_2, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf2 return buf3, primals_2, buf0 def new_parameter(*size): out = nn.Parameter(torch.FloatTensor(*size)) torch.nn.init.xavier_normal_(out) return out class AttentionNew(nn.Module): def __init__(self, attention_size): super(AttentionNew, self).__init__() self.attention = new_parameter(attention_size, 1) def forward(self, input_0): primals_1 = self.attention primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
aaronbae/acl2020-dialogue-coherence-assessment
Attention
false
1,343
[ "MIT" ]
0
98142558b2f80ace390d6b583a3242a373803a85
https://github.com/aaronbae/acl2020-dialogue-coherence-assessment/tree/98142558b2f80ace390d6b583a3242a373803a85
Iter_Downsample
import torch import torch.nn as nn from math import sqrt as sqrt from itertools import product as product class Iter_Downsample(nn.Module): def __init__(self): super(Iter_Downsample, self).__init__() self.init_ds = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2, padding=0), nn.MaxPool2d(kernel_size=2, stride=2, padding=0)) self.ds1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1) self.ds2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.ds3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1) self.ds4 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) def forward(self, x): x = self.init_ds(x) x1 = self.ds1(x) x2 = self.ds2(x1) x3 = self.ds3(x2) x4 = self.ds4(x3) return x1, x2, x3, x4 def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from math import sqrt as sqrt from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy ='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1296 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 9 % 9 x0 = xindex % 9 x2 = xindex // 81 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 16, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-17 + 2 * x0 + 32 * x1 + 256 * x2), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-16 + 2 * x0 + 32 * x1 + 256 * x2), tmp16 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 2 * x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 32 * x1 + 256 * x2), tmp23 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (2 * x0 + 32 * x1 + 256 * x2), tmp26 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tl.store(out_ptr0 + x4, tmp28, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 x3 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 18 * x1 + 81 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 18 * x1 + 81 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (9 + 2 * x0 + 18 * x1 + 81 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (10 + 2 * x0 + 18 * x1 + 81 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 144 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 3 % 3 x0 = xindex % 3 x2 = xindex // 9 x4 = xindex tmp0 = -1 + 2 * x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = -1 + 2 * x0 tmp7 = tmp6 >= tmp1 tmp8 = tmp6 < tmp3 tmp9 = tmp7 & tmp8 tmp10 = tmp5 & tmp9 tmp11 = tl.load(in_ptr0 + (-5 + 2 * x0 + 8 * x1 + 16 * x2), tmp10 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp12 = 2 * x0 tmp13 = tmp12 >= tmp1 tmp14 = tmp12 < tmp3 tmp15 = tmp13 & tmp14 tmp16 = tmp5 & tmp15 tmp17 = tl.load(in_ptr0 + (-4 + 2 * x0 + 8 * x1 + 16 * x2), tmp16 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp18 = triton_helpers.maximum(tmp17, tmp11) tmp19 = 2 * x1 tmp20 = tmp19 >= tmp1 tmp21 = tmp19 < tmp3 tmp22 = tmp20 & tmp21 tmp23 = tmp22 & tmp9 tmp24 = tl.load(in_ptr0 + (-1 + 2 * x0 + 8 * x1 + 16 * x2), tmp23 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp25 = triton_helpers.maximum(tmp24, tmp18) tmp26 = tmp22 & tmp15 tmp27 = tl.load(in_ptr0 + (2 * x0 + 8 * x1 + 16 * x2), tmp26 & xmask, eviction_policy='evict_last', other=float('-inf')) tmp28 = triton_helpers.maximum(tmp27, tmp25) tl.store(out_ptr0 + x4, tmp28, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 9 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 9 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (3 + 9 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (4 + 9 * x0), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x0, tmp6, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16384)](arg0_1, buf0, 16384, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4, 4, 16, 16), (1024, 256, 16, 1), torch .float32) triton_poi_fused_max_pool2d_with_indices_1[grid(4096)](buf0, buf1, 4096, XBLOCK=128, num_warps=4, num_stages=1) del buf0 buf2 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_2[grid(1296)](buf1, buf2, 1296, XBLOCK=128, num_warps=4, num_stages=1) del buf1 buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_3[grid(256)](buf2, buf3, 256, XBLOCK=128, num_warps=4, num_stages=1) buf4 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_4[grid(144)](buf3, buf4, 144, XBLOCK=256, num_warps=4, num_stages=1) buf5 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_5[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) return buf2, buf3, buf4, buf5 class Iter_DownsampleNew(nn.Module): def __init__(self): super(Iter_DownsampleNew, self).__init__() self.init_ds = nn.Sequential(nn.MaxPool2d(kernel_size=2, stride=2, padding=0), nn.MaxPool2d(kernel_size=2, stride=2, padding=0)) self.ds1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1) self.ds2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.ds3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1) self.ds4 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0], output[1], output[2], output[3]
vaesl/LFIP
Iter_Downsample
false
16,659
[ "MIT" ]
59
eb9d934616c508c9a9032f170baa1d97fa792822
https://github.com/vaesl/LFIP/tree/eb9d934616c508c9a9032f170baa1d97fa792822
SelfGating
import torch from torch import nn import torch as th import torch.hub import torch.utils.data class SelfGating(nn.Module): def __init__(self, input_dim): super(SelfGating, self).__init__() self.fc = nn.Linear(input_dim, input_dim) def forward(self, input_tensor): """Feature gating as used in S3D-G. """ spatiotemporal_average = th.mean(input_tensor, dim=[2, 3, 4]) weights = self.fc(spatiotemporal_average) weights = th.sigmoid(weights) return weights[:, :, None, None, None] * input_tensor def get_inputs(): return [torch.rand([4, 4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn import torch.hub import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp5 = 64.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp6, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 64 x2 = xindex tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr1 + x2, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tl.store(out_ptr0 + x2, tmp3, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 64, XBLOCK=1, num_warps=2, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, buf1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_mul_1[grid(1024)](buf2, primals_1, buf3, 1024, XBLOCK=256, num_warps=4, num_stages=1) return buf3, primals_1, buf1, buf2 class SelfGatingNew(nn.Module): def __init__(self, input_dim): super(SelfGatingNew, self).__init__() self.fc = nn.Linear(input_dim, input_dim) def forward(self, input_0): primals_2 = self.fc.weight primals_3 = self.fc.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
nicholasneo78/wav2vec-demo
SelfGating
false
12,832
[ "MIT" ]
0
c37db7b8211458dc810a85d4262ef41e3e3e4f12
https://github.com/nicholasneo78/wav2vec-demo/tree/c37db7b8211458dc810a85d4262ef41e3e3e4f12
AdaptiveFeatureNorm
import torch class AdaptiveFeatureNorm(torch.nn.Module): """ Implementation of the loss in [Larger Norm More Transferable: An Adaptive Feature Norm Approach for Unsupervised Domain Adaptation](https://arxiv.org/abs/1811.07456). Encourages features to gradually have larger and larger L2 norms. """ def __init__(self, step_size: 'float'=1): """ Arguments: step_size: The desired increase in L2 norm at each iteration. Note that the loss will always be equal to ```step_size``` because the goal is always to make the L2 norm ```step_size``` larger than whatever the current L2 norm is. """ super().__init__() self.step_size = step_size def forward(self, x): """""" l2_norm = x.norm(p=2, dim=1) radius = l2_norm.detach() + self.step_size return torch.mean((l2_norm - radius) ** 2) def extra_repr(self): """""" return c_f.extra_repr(self, ['step_size']) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp12 = 1.0 tmp13 = tmp11 + tmp12 tmp14 = tmp11 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK]) tmp18 = tl.sum(tmp16, 1)[:, None] tmp19 = 64.0 tmp20 = tmp18 / tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0[grid(1)](buf1, arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 return buf1, class AdaptiveFeatureNormNew(torch.nn.Module): """ Implementation of the loss in [Larger Norm More Transferable: An Adaptive Feature Norm Approach for Unsupervised Domain Adaptation](https://arxiv.org/abs/1811.07456). Encourages features to gradually have larger and larger L2 norms. """ def __init__(self, step_size: 'float'=1): """ Arguments: step_size: The desired increase in L2 norm at each iteration. Note that the loss will always be equal to ```step_size``` because the goal is always to make the L2 norm ```step_size``` larger than whatever the current L2 norm is. """ super().__init__() self.step_size = step_size def extra_repr(self): """""" return c_f.extra_repr(self, ['step_size']) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
KevinMusgrave/pytorch-adapt
AdaptiveFeatureNorm
false
13,944
[ "MIT" ]
131
ff1491e1bfcc586afb8ee619712c8816ddf10358
https://github.com/KevinMusgrave/pytorch-adapt/tree/ff1491e1bfcc586afb8ee619712c8816ddf10358
MaskL1Loss
import torch import torch.nn as nn class MaskL1Loss(nn.Module): """ Loss from paper <Pose Guided Person Image Generation> Sec3.1 pose mask loss """ def __init__(self, ratio=1): super(MaskL1Loss, self).__init__() self.criterion = nn.L1Loss() self.ratio = ratio def forward(self, generated_img, target_img, mask): pose_mask_l1 = self.criterion(generated_img * mask, target_img * mask) return self.criterion(generated_img, target_img ) + pose_mask_l1 * self.ratio def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_abs_add_mean_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp7 = tl.load(in_ptr2 + r0, None) tmp2 = tmp0 - tmp1 tmp3 = tl_math.abs(tmp2) tmp4 = tl.broadcast_to(tmp3, [RBLOCK]) tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0)) tmp8 = tmp0 * tmp7 tmp9 = tmp1 * tmp7 tmp10 = tmp8 - tmp9 tmp11 = tl_math.abs(tmp10) tmp12 = tl.broadcast_to(tmp11, [RBLOCK]) tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0)) tmp15 = 256.0 tmp16 = tmp6 / tmp15 tmp17 = tmp14 / tmp15 tmp18 = 1.0 tmp19 = tmp17 * tmp18 tmp20 = tmp16 + tmp19 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_per_fused_abs_add_mean_mul_sub_0[grid(1)](buf2, arg0_1, arg2_1, arg1_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 del arg2_1 return buf2, class MaskL1LossNew(nn.Module): """ Loss from paper <Pose Guided Person Image Generation> Sec3.1 pose mask loss """ def __init__(self, ratio=1): super(MaskL1LossNew, self).__init__() self.criterion = nn.L1Loss() self.ratio = ratio def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
pasan1992/Human-Pose-Transfer
MaskL1Loss
false
16,231
[ "MIT" ]
64
a7febc632d4fbf627ba05740d2048accb25575f2
https://github.com/pasan1992/Human-Pose-Transfer/tree/a7febc632d4fbf627ba05740d2048accb25575f2
BinaryFocalLossWithLogits
import torch import warnings from typing import Optional import torch.nn as nn import torch.nn.functional as F def binary_focal_loss_with_logits(input: 'torch.Tensor', target: 'torch.Tensor', alpha: 'float'=0.25, gamma: 'float'=2.0, reduction: 'str'='none', eps: 'Optional[float]'=None) ->torch.Tensor: """Function that computes Binary Focal loss. .. math:: \\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t) where: - :math:`p_t` is the model's estimated probability for each class. Args: input: input data tensor of arbitrary shape. target: the target tensor with shape matching input. alpha: Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`. gamma: Focusing parameter :math:`\\gamma >= 0`. reduction: Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. eps: Deprecated: scalar for numerically stability when dividing. This is no longer used. Returns: the computed loss. Examples: >>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'} >>> logits = torch.tensor([[[6.325]],[[5.26]],[[87.49]]]) >>> labels = torch.tensor([[[1.]],[[1.]],[[0.]]]) >>> binary_focal_loss_with_logits(logits, labels, **kwargs) tensor(21.8725) """ if eps is not None and not torch.jit.is_scripting(): warnings.warn( '`binary_focal_loss_with_logits` has been reworked for improved numerical stability and the `eps` argument is no longer necessary' , DeprecationWarning, stacklevel=2) if not isinstance(input, torch.Tensor): raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}') if not len(input.shape) >= 2: raise ValueError( f'Invalid input shape, we expect BxCx*. Got: {input.shape}') if input.size(0) != target.size(0): raise ValueError( f'Expected input batch_size ({input.size(0)}) to match target batch_size ({target.size(0)}).' ) probs_pos = torch.sigmoid(input) probs_neg = torch.sigmoid(-input) loss_tmp = -alpha * torch.pow(probs_neg, gamma) * target * F.logsigmoid( input) - (1 - alpha) * torch.pow(probs_pos, gamma) * (1.0 - target ) * F.logsigmoid(-input) if reduction == 'none': loss = loss_tmp elif reduction == 'mean': loss = torch.mean(loss_tmp) elif reduction == 'sum': loss = torch.sum(loss_tmp) else: raise NotImplementedError(f'Invalid reduction mode: {reduction}') return loss class BinaryFocalLossWithLogits(nn.Module): """Criterion that computes Focal loss. According to :cite:`lin2018focal`, the Focal loss is computed as follows: .. math:: \\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t) where: - :math:`p_t` is the model's estimated probability for each class. Args: alpha): Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`. gamma: Focusing parameter :math:`\\gamma >= 0`. reduction: Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Shape: - Input: :math:`(N, *)`. - Target: :math:`(N, *)`. Examples: >>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'} >>> loss = BinaryFocalLossWithLogits(**kwargs) >>> input = torch.randn(1, 3, 5, requires_grad=True) >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(2) >>> output = loss(input, target) >>> output.backward() """ def __init__(self, alpha: 'float', gamma: 'float'=2.0, reduction: 'str' ='none') ->None: super().__init__() self.alpha: 'float' = alpha self.gamma: 'float' = gamma self.reduction: 'str' = reduction def forward(self, input: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: return binary_focal_loss_with_logits(input, target, self.alpha, self.gamma, self.reduction) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'alpha': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import warnings from typing import Optional import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_log_sigmoid_forward_mul_neg_pow_rsub_sigmoid_sub_0(in_ptr0 , in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp6 = tl.load(in_ptr1 + x0, xmask) tmp1 = -tmp0 tmp2 = tl.sigmoid(tmp1) tmp3 = tmp2 * tmp2 tmp4 = -4.0 tmp5 = tmp3 * tmp4 tmp7 = tmp5 * tmp6 tmp8 = 0.0 tmp9 = triton_helpers.minimum(tmp8, tmp0) tmp10 = tl_math.abs(tmp0) tmp11 = -tmp10 tmp12 = tl_math.exp(tmp11) tmp13 = libdevice.log1p(tmp12) tmp14 = tmp9 - tmp13 tmp15 = tmp7 * tmp14 tmp16 = tl.sigmoid(tmp0) tmp17 = tmp16 * tmp16 tmp18 = -3.0 tmp19 = tmp17 * tmp18 tmp20 = 1.0 tmp21 = tmp20 - tmp6 tmp22 = tmp19 * tmp21 tmp23 = triton_helpers.minimum(tmp8, tmp1) tmp24 = tl_math.abs(tmp1) tmp25 = -tmp24 tmp26 = tl_math.exp(tmp25) tmp27 = libdevice.log1p(tmp26) tmp28 = tmp23 - tmp27 tmp29 = tmp22 * tmp28 tmp30 = tmp15 - tmp29 tl.store(out_ptr0 + x0, tmp30, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_log_sigmoid_forward_mul_neg_pow_rsub_sigmoid_sub_0[ grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf0, def binary_focal_loss_with_logits(input: 'torch.Tensor', target: 'torch.Tensor', alpha: 'float'=0.25, gamma: 'float'=2.0, reduction: 'str'='none', eps: 'Optional[float]'=None) ->torch.Tensor: """Function that computes Binary Focal loss. .. math:: \\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t) where: - :math:`p_t` is the model's estimated probability for each class. Args: input: input data tensor of arbitrary shape. target: the target tensor with shape matching input. alpha: Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`. gamma: Focusing parameter :math:`\\gamma >= 0`. reduction: Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. eps: Deprecated: scalar for numerically stability when dividing. This is no longer used. Returns: the computed loss. Examples: >>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'} >>> logits = torch.tensor([[[6.325]],[[5.26]],[[87.49]]]) >>> labels = torch.tensor([[[1.]],[[1.]],[[0.]]]) >>> binary_focal_loss_with_logits(logits, labels, **kwargs) tensor(21.8725) """ if eps is not None and not torch.jit.is_scripting(): warnings.warn( '`binary_focal_loss_with_logits` has been reworked for improved numerical stability and the `eps` argument is no longer necessary' , DeprecationWarning, stacklevel=2) if not isinstance(input, torch.Tensor): raise TypeError(f'Input type is not a torch.Tensor. Got {type(input)}') if not len(input.shape) >= 2: raise ValueError( f'Invalid input shape, we expect BxCx*. Got: {input.shape}') if input.size(0) != target.size(0): raise ValueError( f'Expected input batch_size ({input.size(0)}) to match target batch_size ({target.size(0)}).' ) probs_pos = torch.sigmoid(input) probs_neg = torch.sigmoid(-input) loss_tmp = -alpha * torch.pow(probs_neg, gamma) * target * F.logsigmoid( input) - (1 - alpha) * torch.pow(probs_pos, gamma) * (1.0 - target ) * F.logsigmoid(-input) if reduction == 'none': loss = loss_tmp elif reduction == 'mean': loss = torch.mean(loss_tmp) elif reduction == 'sum': loss = torch.sum(loss_tmp) else: raise NotImplementedError(f'Invalid reduction mode: {reduction}') return loss class BinaryFocalLossWithLogitsNew(nn.Module): """Criterion that computes Focal loss. According to :cite:`lin2018focal`, the Focal loss is computed as follows: .. math:: \\text{FL}(p_t) = -\\alpha_t (1 - p_t)^{\\gamma} \\, \\text{log}(p_t) where: - :math:`p_t` is the model's estimated probability for each class. Args: alpha): Weighting factor for the rare class :math:`\\alpha \\in [0, 1]`. gamma: Focusing parameter :math:`\\gamma >= 0`. reduction: Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Shape: - Input: :math:`(N, *)`. - Target: :math:`(N, *)`. Examples: >>> kwargs = {"alpha": 0.25, "gamma": 2.0, "reduction": 'mean'} >>> loss = BinaryFocalLossWithLogits(**kwargs) >>> input = torch.randn(1, 3, 5, requires_grad=True) >>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(2) >>> output = loss(input, target) >>> output.backward() """ def __init__(self, alpha: 'float', gamma: 'float'=2.0, reduction: 'str' ='none') ->None: super().__init__() self.alpha: 'float' = alpha self.gamma: 'float' = gamma self.reduction: 'str' = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
YanivHollander/kornia
BinaryFocalLossWithLogits
false
14,631
[ "ECL-2.0", "Apache-2.0" ]
418
ccd258d0956da89b1feca96448eff8e4969d405a
https://github.com/YanivHollander/kornia/tree/ccd258d0956da89b1feca96448eff8e4969d405a
AdaIN
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/r7/cr7cmxjaiaafsi7ahl4uf53amkxiklgvjgsnuhsp2vdaaoeplj6o.py # Topologically Sorted Source Nodes: [sub, normalized, mul, add_2], Original ATen: [aten.sub, aten.div, aten.mul, aten.add] # Source node to ATen node mapping: # add_2 => add_2 # mul => mul # normalized => div # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %expand), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %expand_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %expand_2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %expand_3), kwargs = {}) triton_poi_fused_add_div_mul_sub_0 = async_compile.triton('triton_poi_fused_add_div_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr1 + (x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (16 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr1 + (32 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x1 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp10 / tmp26 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp34 = tmp32 + tmp33 tmp35 = tmp34 / tmp8 tmp36 = tmp28 - tmp35 tmp37 = tmp36 * tmp36 tmp38 = tmp29 - tmp35 tmp39 = tmp38 * tmp38 tmp40 = tmp37 + tmp39 tmp41 = tmp31 - tmp35 tmp42 = tmp41 * tmp41 tmp43 = tmp40 + tmp42 tmp44 = tmp33 - tmp35 tmp45 = tmp44 * tmp44 tmp46 = tmp43 + tmp45 tmp47 = tmp46 / tmp22 tmp48 = tmp47 + tmp24 tmp49 = libdevice.sqrt(tmp48) tmp50 = tmp27 * tmp49 tmp51 = tmp50 + tmp35 tl.store(in_out_ptr0 + (x3), tmp51, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [sub, normalized, mul, add_2], Original ATen: [aten.sub, aten.div, aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0.run(buf1, arg0_1, arg1_1, 256, grid=grid(256), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr0 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr0 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr1 + (x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr1 + (16 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp31 = tl.load(in_ptr1 + (32 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp33 = tl.load(in_ptr1 + (48 + x1 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = 4.0 tmp9 = tmp7 / tmp8 tmp10 = tmp0 - tmp9 tmp11 = tmp1 - tmp9 tmp12 = tmp11 * tmp11 tmp13 = tmp2 - tmp9 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp16 = tmp4 - tmp9 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp6 - tmp9 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = 3.0 tmp23 = tmp21 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = tmp10 / tmp26 tmp30 = tmp28 + tmp29 tmp32 = tmp30 + tmp31 tmp34 = tmp32 + tmp33 tmp35 = tmp34 / tmp8 tmp36 = tmp28 - tmp35 tmp37 = tmp36 * tmp36 tmp38 = tmp29 - tmp35 tmp39 = tmp38 * tmp38 tmp40 = tmp37 + tmp39 tmp41 = tmp31 - tmp35 tmp42 = tmp41 * tmp41 tmp43 = tmp40 + tmp42 tmp44 = tmp33 - tmp35 tmp45 = tmp44 * tmp44 tmp46 = tmp43 + tmp45 tmp47 = tmp46 / tmp22 tmp48 = tmp47 + tmp24 tmp49 = libdevice.sqrt(tmp48) tmp50 = tmp27 * tmp49 tmp51 = tmp50 + tmp35 tl.store(in_out_ptr0 + x3, tmp51, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_div_mul_sub_0[grid(256)](buf1, arg0_1, arg1_1, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 return buf1, class AdaINNew(torch.nn.Module): def __init__(self, epsilon: 'float'=1e-05): super(AdaINNew, self).__init__() self.epsilon = epsilon def calc_vector_mean_std(self, x): std = torch.sqrt(torch.var(x, dim=1) + self.epsilon) mean = torch.mean(x, dim=1) return mean, std def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
ai-in-motion/moai
AdaIN
false
18,311
[ "Apache-2.0" ]
10
e38cac046c059d2e2331ef4883bbabc5a500a5cf
https://github.com/ai-in-motion/moai/tree/e38cac046c059d2e2331ef4883bbabc5a500a5cf
ParityPonderGRU
from torch.nn import Module import torch from torch import nn from typing import Tuple import torch.utils.data import torch.nn.functional import torch.autograd class ParityPonderGRU(Module): """ ## PonderNet with GRU for Parity Task This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html) as the step function. This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`. Each element of the vector is either `0`, `1` or `-1` and the output is the parity - a binary value that is true if the number of `1`s is odd and false otherwise. The prediction of the model is the log probability of the parity being $1$. """ def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'): """ * `n_elems` is the number of elements in the input vector * `n_hidden` is the state vector size of the GRU * `max_steps` is the maximum number of steps $N$ """ super().__init__() self.max_steps = max_steps self.n_hidden = n_hidden self.gru = nn.GRUCell(n_elems, n_hidden) self.output_layer = nn.Linear(n_hidden, 1) self.lambda_layer = nn.Linear(n_hidden, 1) self.lambda_prob = nn.Sigmoid() self.is_halt = False def forward(self, x: 'torch.Tensor') ->Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ * `x` is the input of shape `[batch_size, n_elems]` This outputs a tuple of four tensors: 1. $p_1 \\dots p_N$ in a tensor of shape `[N, batch_size]` 2. $\\hat{y}_1 \\dots \\hat{y}_N$ in a tensor of shape `[N, batch_size]` - the log probabilities of the parity being $1$ 3. $p_m$ of shape `[batch_size]` 4. $\\hat{y}_m$ of shape `[batch_size]` where the computation was halted at step $m$ """ batch_size = x.shape[0] h = x.new_zeros((x.shape[0], self.n_hidden)) h = self.gru(x, h) p = [] y = [] un_halted_prob = h.new_ones((batch_size,)) halted = h.new_zeros((batch_size,)) p_m = h.new_zeros((batch_size,)) y_m = h.new_zeros((batch_size,)) for n in range(1, self.max_steps + 1): if n == self.max_steps: lambda_n = h.new_ones(h.shape[0]) else: lambda_n = self.lambda_prob(self.lambda_layer(h))[:, 0] y_n = self.output_layer(h)[:, 0] p_n = un_halted_prob * lambda_n un_halted_prob = un_halted_prob * (1 - lambda_n) halt = torch.bernoulli(lambda_n) * (1 - halted) p.append(p_n) y.append(y_n) p_m = p_m * (1 - halt) + p_n * halt y_m = y_m * (1 - halt) + y_n * halt halted = halted + halt h = self.gru(x, h) if self.is_halt and halted.sum() == batch_size: break return torch.stack(p), torch.stack(y), p_m, y_m def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'n_elems': 4, 'n_hidden': 4, 'max_steps': 4}]
import torch from torch import device from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch.nn import Module from torch import nn import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_new_zeros_0(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + x0, tmp0, xmask) @triton.jit def triton_poi_fused_stack_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 28 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + x0, tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + x0, tmp14 & xmask, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + x0, tmp19 & xmask, eviction_policy= 'evict_last', other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 5, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr1 + x0, tmp24 & xmask, eviction_policy= 'evict_last', other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 6, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tl.load(in_ptr1 + x0, tmp29 & xmask, eviction_policy= 'evict_last', other=0.0) tmp31 = tmp0 >= tmp27 tl.full([1], 7, tl.int64) tmp34 = tl.load(in_ptr1 + x0, tmp31 & xmask, eviction_policy= 'evict_last', other=0.0) tmp35 = tl.where(tmp29, tmp30, tmp34) tmp36 = tl.where(tmp24, tmp25, tmp35) tmp37 = tl.where(tmp19, tmp20, tmp36) tmp38 = tl.where(tmp14, tmp15, tmp37) tmp39 = tl.where(tmp9, tmp10, tmp38) tmp40 = tl.where(tmp4, tmp5, tmp39) tl.store(out_ptr0 + x2, tmp40, xmask) @triton.jit def triton_poi_fused_stack_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 7 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp23 = tl.load(in_ptr1 + 0) tmp24 = tl.broadcast_to(tmp23, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = tmp0 >= tmp3 tmp8 = tl.full([1], 2, tl.int64) tmp9 = tmp0 < tmp8 tmp10 = tmp7 & tmp9 tmp11 = tmp0 >= tmp8 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tmp0 >= tmp12 tmp16 = tl.full([1], 4, tl.int64) tmp17 = tmp0 < tmp16 tmp18 = tmp15 & tmp17 tmp19 = tmp0 >= tmp16 tmp20 = tl.full([1], 5, tl.int64) tmp21 = tmp0 < tmp20 tmp22 = tmp19 & tmp21 tmp25 = tmp0 >= tmp20 tmp26 = tl.full([1], 6, tl.int64) tmp27 = tmp0 < tmp26 tmp28 = tmp25 & tmp27 tl.full([1], 7, tl.int64) tmp32 = tl.where(tmp28, tmp24, tmp24) tmp33 = tl.where(tmp22, tmp24, tmp32) tmp34 = tl.where(tmp18, tmp6, tmp33) tmp35 = tl.where(tmp14, tmp6, tmp34) tmp36 = tl.where(tmp10, tmp6, tmp35) tmp37 = tl.where(tmp4, tmp6, tmp36) tl.store(out_ptr0 + x0, tmp37, xmask) @triton.jit def triton_poi_fused_stack_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 112 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4 * (-4 + x1)), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4 * (-8 + x1)), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr3 + (x0 + 4 * (-12 + x1)), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (x0 + 4 * (-16 + x1)), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tmp27 = tl.full([1], 24, tl.int64) tmp28 = tmp0 < tmp27 tmp29 = tmp26 & tmp28 tmp30 = tl.load(in_ptr1 + (x0 + 4 * (-20 + x1)), tmp29 & xmask, other=0.0) tmp31 = tmp0 >= tmp27 tl.full([1], 28, tl.int64) tmp34 = tl.load(in_ptr2 + (x0 + 4 * (-24 + x1)), tmp31 & xmask, other=0.0) tmp35 = tl.where(tmp29, tmp30, tmp34) tmp36 = tl.where(tmp24, tmp25, tmp35) tmp37 = tl.where(tmp19, tmp20, tmp36) tmp38 = tl.where(tmp14, tmp15, tmp37) tmp39 = tl.where(tmp9, tmp10, tmp38) tmp40 = tl.where(tmp4, tmp5, tmp39) tl.store(out_ptr0 + x2, tmp40, xmask) @triton.jit def triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4( in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, out_ptr1, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp3 = tl.load(in_ptr1 + 4) tmp4 = tl.broadcast_to(tmp3, [XBLOCK]) tmp5 = tl.load(in_ptr2 + (16 + x0), xmask) tmp8 = tl.load(in_ptr1 + 5) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp10 = tl.load(in_ptr2 + (20 + x0), xmask) tmp13 = tl.load(in_ptr1 + 6) tmp14 = tl.broadcast_to(tmp13, [XBLOCK]) tmp15 = tl.load(in_ptr2 + (24 + x0), xmask) tmp18 = tl.load(in_ptr3 + x0, xmask) tmp20 = tl.load(in_ptr4 + x0, xmask) tmp22 = tl.load(in_ptr5 + x0, xmask) tmp59 = tl.load(in_ptr1 + 0) tmp60 = tl.broadcast_to(tmp59, [XBLOCK]) tmp61 = tl.load(in_ptr2 + x0, xmask) tmp66 = tl.load(in_ptr1 + 1) tmp67 = tl.broadcast_to(tmp66, [XBLOCK]) tmp68 = tl.load(in_ptr2 + (4 + x0), xmask) tmp73 = tl.load(in_ptr1 + 2) tmp74 = tl.broadcast_to(tmp73, [XBLOCK]) tmp75 = tl.load(in_ptr2 + (8 + x0), xmask) tmp80 = tl.load(in_ptr1 + 3) tmp81 = tl.broadcast_to(tmp80, [XBLOCK]) tmp82 = tl.load(in_ptr2 + (12 + x0), xmask) tmp1 = 1.0 tmp2 = tmp0 < tmp1 tmp6 = tmp4 + tmp5 tmp7 = tl.sigmoid(tmp6) tmp11 = tmp9 + tmp10 tmp12 = tl.sigmoid(tmp11) tmp16 = tmp14 + tmp15 tmp17 = tl.sigmoid(tmp16) tmp19 = tmp18 < tmp17 tmp21 = tmp20 < tmp12 tmp23 = tmp22 < tmp7 tmp24 = tmp23.to(tl.float32) tmp25 = tmp1 - tmp24 tmp26 = 0.0 tmp27 = tmp26 * tmp25 tmp28 = tmp7 * tmp24 tmp29 = tmp27 + tmp28 tmp30 = tmp21.to(tl.float32) tmp31 = tmp30 * tmp25 tmp32 = tmp1 - tmp31 tmp33 = tmp29 * tmp32 tmp34 = tmp1 - tmp7 tmp35 = tmp34 * tmp12 tmp36 = tmp35 * tmp31 tmp37 = tmp33 + tmp36 tmp38 = tmp19.to(tl.float32) tmp39 = tmp24 + tmp31 tmp40 = tmp1 - tmp39 tmp41 = tmp38 * tmp40 tmp42 = tmp1 - tmp41 tmp43 = tmp37 * tmp42 tmp44 = tmp1 - tmp12 tmp45 = tmp34 * tmp44 tmp46 = tmp45 * tmp17 tmp47 = tmp46 * tmp41 tmp48 = tmp43 + tmp47 tmp49 = tmp2.to(tl.float32) tmp50 = tmp39 + tmp41 tmp51 = tmp1 - tmp50 tmp52 = tmp49 * tmp51 tmp53 = tmp1 - tmp52 tmp54 = tmp48 * tmp53 tmp55 = tmp1 - tmp17 tmp56 = tmp45 * tmp55 tmp57 = tmp56 * tmp52 tmp58 = tmp54 + tmp57 tmp62 = tmp60 + tmp61 tmp63 = tmp62 * tmp24 tmp64 = tmp27 + tmp63 tmp65 = tmp64 * tmp32 tmp69 = tmp67 + tmp68 tmp70 = tmp69 * tmp31 tmp71 = tmp65 + tmp70 tmp72 = tmp71 * tmp42 tmp76 = tmp74 + tmp75 tmp77 = tmp76 * tmp41 tmp78 = tmp72 + tmp77 tmp79 = tmp78 * tmp53 tmp83 = tmp81 + tmp82 tmp84 = tmp83 * tmp52 tmp85 = tmp79 + tmp84 tl.store(out_ptr0 + x0, tmp2, xmask) tl.store(out_ptr1 + x0, tmp7, xmask) tl.store(out_ptr2 + x0, tmp12, xmask) tl.store(out_ptr3 + x0, tmp17, xmask) tl.store(out_ptr4 + x0, tmp19, xmask) tl.store(out_ptr5 + x0, tmp21, xmask) tl.store(out_ptr6 + x0, tmp23, xmask) tl.store(in_out_ptr0 + x0, tmp58, xmask) tl.store(in_out_ptr1 + x0, tmp85, xmask) @triton.jit def triton_poi_fused_stack_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (-4 + x0), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = 1.0 tmp12 = tmp11 - tmp10 tmp13 = tl.load(in_ptr1 + (-4 + x0), tmp9 & xmask, eviction_policy= 'evict_last', other=0.0) tmp14 = tmp12 * tmp13 tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype) tmp16 = tl.where(tmp9, tmp14, tmp15) tmp17 = tmp0 >= tmp7 tmp18 = tl.full([1], 12, tl.int64) tmp19 = tmp0 < tmp18 tmp20 = tmp17 & tmp19 tmp21 = tl.load(in_ptr0 + (-8 + x0), tmp20 & xmask, eviction_policy= 'evict_last', other=0.0) tmp22 = tmp11 - tmp21 tmp23 = tl.load(in_ptr1 + (-8 + x0), tmp20 & xmask, eviction_policy= 'evict_last', other=0.0) tmp24 = tmp11 - tmp23 tmp25 = tmp22 * tmp24 tmp26 = tl.load(in_ptr2 + (-8 + x0), tmp20 & xmask, eviction_policy= 'evict_last', other=0.0) tmp27 = tmp25 * tmp26 tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype) tmp29 = tl.where(tmp20, tmp27, tmp28) tmp30 = tmp0 >= tmp18 tl.full([1], 16, tl.int64) tmp33 = tl.load(in_ptr0 + (-12 + x0), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp34 = tmp11 - tmp33 tmp35 = tl.load(in_ptr1 + (-12 + x0), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp36 = tmp11 - tmp35 tmp37 = tmp34 * tmp36 tmp38 = tl.load(in_ptr2 + (-12 + x0), tmp30 & xmask, eviction_policy= 'evict_last', other=0.0) tmp39 = tmp11 - tmp38 tmp40 = tmp37 * tmp39 tmp41 = tl.full(tmp40.shape, 0.0, tmp40.dtype) tmp42 = tl.where(tmp30, tmp40, tmp41) tmp43 = tl.where(tmp20, tmp29, tmp42) tmp44 = tl.where(tmp9, tmp16, tmp43) tmp45 = tl.where(tmp4, tmp5, tmp44) tl.store(out_ptr0 + x0, tmp45, xmask) @triton.jit def triton_poi_fused_stack_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp5 = tl.load(in_ptr0 + 0) tmp6 = tl.broadcast_to(tmp5, [XBLOCK]) tmp15 = tl.load(in_ptr0 + 1) tmp16 = tl.broadcast_to(tmp15, [XBLOCK]) tmp25 = tl.load(in_ptr0 + 2) tmp26 = tl.broadcast_to(tmp25, [XBLOCK]) tmp34 = tl.load(in_ptr0 + 3) tmp35 = tl.broadcast_to(tmp34, [XBLOCK]) tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp7 = tl.load(in_ptr1 + x0, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp8 = tmp6 + tmp7 tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype) tmp10 = tl.where(tmp4, tmp8, tmp9) tmp11 = tmp0 >= tmp3 tmp12 = tl.full([1], 8, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp17 = tl.load(in_ptr1 + (4 + (-4 + x0)), tmp14 & xmask, eviction_policy='evict_last', other=0.0) tmp18 = tmp16 + tmp17 tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype) tmp20 = tl.where(tmp14, tmp18, tmp19) tmp21 = tmp0 >= tmp12 tmp22 = tl.full([1], 12, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp27 = tl.load(in_ptr1 + (8 + (-8 + x0)), tmp24 & xmask, eviction_policy='evict_last', other=0.0) tmp28 = tmp26 + tmp27 tmp29 = tl.full(tmp28.shape, 0.0, tmp28.dtype) tmp30 = tl.where(tmp24, tmp28, tmp29) tmp31 = tmp0 >= tmp22 tl.full([1], 16, tl.int64) tmp36 = tl.load(in_ptr1 + (12 + (-12 + x0)), tmp31 & xmask, eviction_policy='evict_last', other=0.0) tmp37 = tmp35 + tmp36 tmp38 = tl.full(tmp37.shape, 0.0, tmp37.dtype) tmp39 = tl.where(tmp31, tmp37, tmp38) tmp40 = tl.where(tmp24, tmp30, tmp39) tmp41 = tl.where(tmp14, tmp20, tmp40) tmp42 = tl.where(tmp4, tmp10, tmp41) tl.store(out_ptr0 + x0, tmp42, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12, 4), (4, 1)) assert_size_stride(primals_4, (12,), (1,)) assert_size_stride(primals_5, (12,), (1,)) assert_size_stride(primals_6, (1, 4), (4, 1)) assert_size_stride(primals_7, (1,), (1,)) assert_size_stride(primals_8, (1, 4), (4, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_new_zeros_0[grid(16)](buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) buf1 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 12), (12, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf2) buf3 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf2, buf0, primals_4, primals_5) buf4 = buf3[0] buf5 = buf3[1] del buf3 buf6 = empty_strided_cuda((7, 4), (4, 1), torch.float32) triton_poi_fused_stack_1[grid(28)](primals_8, primals_6, buf6, 28, XBLOCK=32, num_warps=1, num_stages=1) del primals_6 del primals_8 buf7 = empty_strided_cuda((7,), (1,), torch.float32) triton_poi_fused_stack_2[grid(7)](primals_9, primals_7, buf7, 7, XBLOCK=8, num_warps=1, num_stages=1) del primals_7 del primals_9 buf8 = buf2 del buf2 extern_kernels.mm(buf4, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf8) buf9 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf8, buf4, primals_4, primals_5) buf10 = buf9[0] buf11 = buf9[1] del buf9 buf12 = buf8 del buf8 extern_kernels.mm(buf10, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf12) buf13 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf12, buf10, primals_4, primals_5) buf14 = buf13[0] buf15 = buf13[1] del buf13 buf16 = buf12 del buf12 extern_kernels.mm(buf14, reinterpret_tensor(primals_3, (4, 12), (1, 4), 0), out=buf16) buf17 = torch.ops.aten._thnn_fused_gru_cell.default(buf1, buf16, buf14, primals_4, primals_5) del buf1 del buf16 del primals_4 del primals_5 buf18 = buf17[0] buf19 = buf17[1] del buf17 buf20 = empty_strided_cuda((28, 4), (4, 1), torch.float32) triton_poi_fused_stack_3[grid(112)](buf4, buf10, buf14, buf18, buf20, 112, XBLOCK=128, num_warps=4, num_stages=1) buf21 = empty_strided_cuda((7, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf20, (7, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (7, 4, 1), (4, 1, 4), 0), out=buf21) buf23 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf24 = buf23 del buf23 buf27 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf28 = buf27 del buf27 buf31 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf32 = buf31 del buf31 buf36 = torch.ops.aten.rand.default([4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False) buf37 = buf36 del buf36 buf38 = empty_strided_cuda((4,), (1,), torch.bool) buf30 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf26 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf22 = empty_strided_cuda((4, 1), (1, 1), torch.float32) buf25 = empty_strided_cuda((4,), (1,), torch.bool) buf29 = empty_strided_cuda((4,), (1,), torch.bool) buf33 = empty_strided_cuda((4,), (1,), torch.bool) buf34 = empty_strided_cuda((4,), (1,), torch.float32) buf39 = buf34 del buf34 buf35 = empty_strided_cuda((4,), (1,), torch.float32) buf40 = buf35 del buf35 triton_poi_fused_add_bernoulli_mul_new_ones_new_zeros_rsub_sigmoid_4[ grid(4)](buf39, buf40, buf37, buf7, buf21, buf24, buf28, buf32, buf38, buf30, buf26, buf22, buf25, buf29, buf33, 4, XBLOCK=4, num_warps=1, num_stages=1) del buf24 del buf28 del buf32 del buf37 buf41 = reinterpret_tensor(buf18, (16,), (1,), 0) del buf18 triton_poi_fused_stack_5[grid(16)](buf30, buf26, buf22, buf41, 16, XBLOCK=16, num_warps=1, num_stages=1) buf42 = empty_strided_cuda((16,), (1,), torch.float32) triton_poi_fused_stack_6[grid(16)](buf7, buf21, buf42, 16, XBLOCK= 16, num_warps=1, num_stages=1) del buf21 del buf7 return (reinterpret_tensor(buf41, (4, 4), (4, 1), 0), reinterpret_tensor(buf42, (4, 4), (4, 1), 0), buf39, buf40, primals_1, buf0, buf4, buf5, buf10, buf11, buf14, buf15, buf19, buf22, buf25, buf26, buf29, buf30, buf33, buf38, reinterpret_tensor (buf6, (7, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf20, (7, 4, 4 ), (16, 1, 4), 0), primals_3) class ParityPonderGRUNew(Module): """ ## PonderNet with GRU for Parity Task This is a simple model that uses a [GRU Cell](https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html) as the step function. This model is for the [Parity Task](../parity.html) where the input is a vector of `n_elems`. Each element of the vector is either `0`, `1` or `-1` and the output is the parity - a binary value that is true if the number of `1`s is odd and false otherwise. The prediction of the model is the log probability of the parity being $1$. """ def __init__(self, n_elems: 'int', n_hidden: 'int', max_steps: 'int'): """ * `n_elems` is the number of elements in the input vector * `n_hidden` is the state vector size of the GRU * `max_steps` is the maximum number of steps $N$ """ super().__init__() self.max_steps = max_steps self.n_hidden = n_hidden self.gru = nn.GRUCell(n_elems, n_hidden) self.output_layer = nn.Linear(n_hidden, 1) self.lambda_layer = nn.Linear(n_hidden, 1) self.lambda_prob = nn.Sigmoid() self.is_halt = False def forward(self, input_0): primals_2 = self.gru.weight_ih primals_3 = self.gru.weight_hh primals_4 = self.gru.bias_ih primals_5 = self.gru.bias_hh primals_6 = self.output_layer.weight primals_7 = self.output_layer.bias primals_8 = self.lambda_layer.weight primals_9 = self.lambda_layer.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1], output[2], output[3]
techthiyanes/annotated_deep_learning_paper_implementations
ParityPonderGRU
false
16,605
[ "MIT" ]
3,714
8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
https://github.com/techthiyanes/annotated_deep_learning_paper_implementations/tree/8af24da2dd39a9a87482a4d18c2dc829bbd3fd47
CatKLLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/mg/cmg3rjq7o6dnngxd2py7kesfu2nr7fflmrgx6htiquznvsanm322.py # Topologically Sorted Source Nodes: [qy, sub, mul, kl], Original ATen: [aten.exp, aten.sub, aten.mul, aten.sum] # Source node to ATen node mapping: # kl => sum_1 # mul => mul # qy => exp # sub => sub # Graph fragment: # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%exp, %sub), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {}) triton_poi_fused_exp_mul_sub_sum_0 = async_compile.triton('triton_poi_fused_exp_mul_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mul_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp2 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp7 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp11 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp13 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp17 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp19 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp1 = tl_math.exp(tmp0) tmp3 = tmp0 - tmp2 tmp4 = tmp1 * tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp5 - tmp7 tmp9 = tmp6 * tmp8 tmp10 = tmp4 + tmp9 tmp12 = tl_math.exp(tmp11) tmp14 = tmp11 - tmp13 tmp15 = tmp12 * tmp14 tmp16 = tmp10 + tmp15 tmp18 = tl_math.exp(tmp17) tmp20 = tmp17 - tmp19 tmp21 = tmp18 * tmp20 tmp22 = tmp16 + tmp21 tl.store(out_ptr0 + (x2), tmp22, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [qy, sub, mul, kl], Original ATen: [aten.exp, aten.sub, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_exp_mul_sub_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn.modules.loss import _Loss assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_exp_mul_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp2 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp7 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp11 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp13 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp17 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp19 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp1 = tl_math.exp(tmp0) tmp3 = tmp0 - tmp2 tmp4 = tmp1 * tmp3 tmp6 = tl_math.exp(tmp5) tmp8 = tmp5 - tmp7 tmp9 = tmp6 * tmp8 tmp10 = tmp4 + tmp9 tmp12 = tl_math.exp(tmp11) tmp14 = tmp11 - tmp13 tmp15 = tmp12 * tmp14 tmp16 = tmp10 + tmp15 tmp18 = tl_math.exp(tmp17) tmp20 = tmp17 - tmp19 tmp21 = tmp18 * tmp20 tmp22 = tmp16 + tmp21 tl.store(out_ptr0 + x2, tmp22, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_exp_mul_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf0, class CatKLLossNew(_Loss): def __init__(self, reduction='none'): super(CatKLLossNew, self).__init__() assert reduction in ['none', 'sum', 'mean'] self.reduction = reduction def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
imguozhen/proactive-chat
CatKLLoss
false
10,289
[ "Apache-2.0" ]
0
80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
https://github.com/imguozhen/proactive-chat/tree/80d13e28cb93c26a65ace0a028c53fd0bafcdbf9
Conv1d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/tp/ctphy5pjrbocsqivqoycown67e7tivt2hqux74cyakixoykbezwo.py # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.constant_pad_nd] # Source node to ATen node mapping: # input_1 => constant_pad_nd # Graph fragment: # %constant_pad_nd : [num_users=1] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_2, [1, 1], 0.0), kwargs = {}) triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[32], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = (xindex // 6) x2 = xindex tmp0 = (-1) + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + ((-1) + x0 + (4*x1)), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + (x2), tmp6, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uj/cujexvhfquvwa2b5g6ftj7xjnepkxifmydmocky5zo4utu4xskqq.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 3) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6), (6, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.constant_pad_nd] stream0 = get_raw_stream(0) triton_poi_fused_constant_pad_nd_0.run(primals_2, buf0, 24, grid=grid(24), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 6), (0, 6, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 3), (12, 3, 1)) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] triton_poi_fused_convolution_1.run(buf2, primals_3, 12, grid=grid(12), stream=stream0) del primals_3 return (reinterpret_tensor(buf2, (4, 3), (3, 1), 0), primals_1, reinterpret_tensor(buf0, (1, 4, 6), (24, 6, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 24 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 6 x1 = xindex // 6 x2 = xindex tmp0 = -1 + x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tmp2 & tmp4 tmp6 = tl.load(in_ptr0 + (-1 + x0 + 4 * x1), tmp5 & xmask, other=0.0) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 12 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 3 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6), (6, 1), torch.float32) get_raw_stream(0) triton_poi_fused_constant_pad_nd_0[grid(24)](primals_2, buf0, 24, XBLOCK=32, num_warps=1, num_stages=1) del primals_2 buf1 = extern_kernels.convolution(reinterpret_tensor(buf0, (1, 4, 6 ), (0, 6, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf1, (1, 4, 3), (12, 3, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_1[grid(12)](buf2, primals_3, 12, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return reinterpret_tensor(buf2, (4, 3), (3, 1), 0 ), primals_1, reinterpret_tensor(buf0, (1, 4, 6), (24, 6, 1), 0) class Conv1dNew(nn.Conv1d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding='same', dilation=1, groups=1, bias=True): super(Conv1dNew, self).__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride= stride, padding=0, dilation=dilation, groups=groups, bias=bias, padding_mode='zeros') assert padding in ['valid', 'same', 'causal'] if padding == 'valid': self.pre_padding = None elif padding == 'same': self.pre_padding = nn.ConstantPad1d(padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2), value=0) elif padding == 'causal': self.pre_padding = nn.ConstantPad1d(padding=(kernel_size - 1, 0 ), value=0) self.noise = None self.vn_std = None def init_vn(self, vn_std): self.vn_std = vn_std def sample_synaptic_noise(self, distributed): self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size( ), device=self.weight.device, dtype=self.weight.dtype) if distributed: torch.distributed.broadcast(self.noise, 0) def forward(self, input_0): primals_1 = self.weight primals_3 = self.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
gheyret/EfficientConformer
Conv1d
false
15,429
[ "Apache-2.0" ]
101
b28a0aaa3b182f72abaccbeb12df0402adf96097
https://github.com/gheyret/EfficientConformer/tree/b28a0aaa3b182f72abaccbeb12df0402adf96097
BertPreTrainingHeads
from _paritybench_helpers import _mock_config import math import torch from torch import nn def gelu(x): """Gaussian Error Linear Unitという活性化関数です。 LeLUが0でカクっと不連続なので、そこを連続になるように滑らかにした形のLeLUです。 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """LayerNormalization層です。 学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。 オリジナルのGitHubの実装から変数名を変えています。 weight→gamma、bias→beta """ super(BertLayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(hidden_size)) self.beta = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class BertPredictionHeadTransform(nn.Module): """MaskedWordPredictionsにて、BERTからの特徴量を変換するモジュール(入出力のサイズは同じ)""" def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = gelu self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) def forward(self, hidden_states): """hidden_statesはsequence_output:[minibatch, seq_len, hidden_size]""" hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class MaskedWordPredictions(nn.Module): def __init__(self, config): """事前学習課題:Masked Language Model用のモジュール 元の[2]の実装では、BertLMPredictionHeadという名前です。 """ super(MaskedWordPredictions, self).__init__() self.transform = BertPredictionHeadTransform(config) self.decoder = nn.Linear(in_features=config.hidden_size, out_features=config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, hidden_states): """ hidden_states:BERTからの出力[batch_size, seq_len, hidden_size] """ hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class BertPreTrainingHeads(nn.Module): """BERTの事前学習課題を行うアダプターモジュール""" def __init__(self, config, bert_model_embedding_weights): super(BertPreTrainingHeads, self).__init__() self.predictions = MaskedWordPredictions(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): """入力情報 sequence_output:[batch_size, seq_len, hidden_size] pooled_output:[batch_size, hidden_size] """ prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(hidden_size=4, vocab_size=4), 'bert_model_embedding_weights': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_div_erf_mean_mul_pow_sub_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865475 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tmp10 = tmp9 * tmp1 tmp11 = tmp9 * tmp3 tmp12 = libdevice.erf(tmp11) tmp13 = tmp12 + tmp6 tmp14 = tmp10 * tmp13 tmp15 = tmp8 + tmp14 tmp17 = tmp16 * tmp1 tmp18 = tmp16 * tmp3 tmp19 = libdevice.erf(tmp18) tmp20 = tmp19 + tmp6 tmp21 = tmp17 * tmp20 tmp22 = tmp15 + tmp21 tmp24 = tmp23 * tmp1 tmp25 = tmp23 * tmp3 tmp26 = libdevice.erf(tmp25) tmp27 = tmp26 + tmp6 tmp28 = tmp24 * tmp27 tmp29 = tmp22 + tmp28 tmp30 = 4.0 tmp31 = tmp29 / tmp30 tmp32 = tmp8 - tmp31 tmp33 = tmp32 * tmp32 tmp34 = tmp14 - tmp31 tmp35 = tmp34 * tmp34 tmp36 = tmp33 + tmp35 tmp37 = tmp21 - tmp31 tmp38 = tmp37 * tmp37 tmp39 = tmp36 + tmp38 tmp40 = tmp28 - tmp31 tmp41 = tmp40 * tmp40 tmp42 = tmp39 + tmp41 tmp43 = tmp42 / tmp30 tl.store(out_ptr0 + x0, tmp31, xmask) tl.store(out_ptr1 + x0, tmp43, xmask) @triton.jit def triton_poi_fused_add_div_erf_mul_sqrt_sub_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = 0.7071067811865475 tmp5 = tmp1 * tmp4 tmp6 = libdevice.erf(tmp5) tmp7 = 1.0 tmp8 = tmp6 + tmp7 tmp9 = tmp3 * tmp8 tmp11 = tmp9 - tmp10 tmp13 = 1e-12 tmp14 = tmp12 + tmp13 tmp15 = libdevice.sqrt(tmp14) tmp16 = tmp11 / tmp15 tmp17 = tmp0 * tmp16 tmp19 = tmp17 + tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) @triton.jit def triton_poi_fused_add_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (2, 4), (4, 1)) assert_size_stride(primals_9, (2,), (1,)) assert_size_stride(primals_10, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_erf_mean_mul_pow_sub_0[grid(64)](buf0, buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_erf_mul_sqrt_sub_1[grid(256)](primals_4, buf0, buf1, buf2, primals_5, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del buf2 del primals_5 buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf4) buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf4 triton_poi_fused_add_2[grid(256)](buf5, primals_7, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(primals_10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 2), (1, 4), 0 ), alpha=1, beta=1, out=buf6) del primals_8 del primals_9 return buf5, reinterpret_tensor(buf6, (4, 4, 4, 2), (32, 8, 2, 1), 0 ), primals_4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_10, (64, 4), (4, 1), 0), primals_6 def gelu(x): """Gaussian Error Linear Unitという活性化関数です。 LeLUが0でカクっと不連続なので、そこを連続になるように滑らかにした形のLeLUです。 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-12): """LayerNormalization層です。 学習済みモデルをそのままロードするため、学習済みモデルの変数名に変えています。 オリジナルのGitHubの実装から変数名を変えています。 weight→gamma、bias→beta """ super(BertLayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(hidden_size)) self.beta = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.gamma * x + self.beta class BertPredictionHeadTransform(nn.Module): """MaskedWordPredictionsにて、BERTからの特徴量を変換するモジュール(入出力のサイズは同じ)""" def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.transform_act_fn = gelu self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) def forward(self, hidden_states): """hidden_statesはsequence_output:[minibatch, seq_len, hidden_size]""" hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class MaskedWordPredictions(nn.Module): def __init__(self, config): """事前学習課題:Masked Language Model用のモジュール 元の[2]の実装では、BertLMPredictionHeadという名前です。 """ super(MaskedWordPredictions, self).__init__() self.transform = BertPredictionHeadTransform(config) self.decoder = nn.Linear(in_features=config.hidden_size, out_features=config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) def forward(self, hidden_states): """ hidden_states:BERTからの出力[batch_size, seq_len, hidden_size] """ hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class BertPreTrainingHeadsNew(nn.Module): """BERTの事前学習課題を行うアダプターモジュール""" def __init__(self, config, bert_model_embedding_weights): super(BertPreTrainingHeadsNew, self).__init__() self.predictions = MaskedWordPredictions(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, input_0, input_1): primals_2 = self.predictions.bias primals_1 = self.predictions.transform.dense.weight primals_4 = self.predictions.transform.dense.bias primals_5 = self.predictions.transform.LayerNorm.gamma primals_7 = self.predictions.transform.LayerNorm.beta primals_6 = self.predictions.decoder.weight primals_8 = self.seq_relationship.weight primals_9 = self.seq_relationship.bias primals_3 = input_0 primals_10 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10]) return output[0], output[1]
kimihitosugiyama/text_analysis
BertPreTrainingHeads
false
3,841
[ "Apache-2.0" ]
0
8f51022957928c31e52af1e0fd407daca3addb40
https://github.com/kimihitosugiyama/text_analysis/tree/8f51022957928c31e52af1e0fd407daca3addb40
PairwiseDistanceMatrix
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/lw/clwf2by6lntc7xp7lanpwlhzhhgt5pqgunet732rgutttl2u2tus.py # Topologically Sorted Source Nodes: [add, mul, D], Original ATen: [aten.add, aten.mul, aten.sub] # Source node to ATen node mapping: # D => sub # add => add # mul => mul # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view, %view_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 2.0), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul), kwargs = {}) triton_poi_fused_add_mul_sub_0 = async_compile.triton('triton_poi_fused_add_mul_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp16 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tl.store(in_out_ptr0 + (x2), tmp26, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm] extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [add, mul, D], Original ATen: [aten.add, aten.mul, aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_add_mul_sub_0.run(buf1, arg0_1, arg1_1, 16, grid=grid(16), stream=stream0) del arg0_1 del arg1_1 return (buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.nn.parallel import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_sub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x0 = xindex % 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp19 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp23 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp12 = tmp11 * tmp11 tmp14 = tmp13 * tmp13 tmp15 = tmp12 + tmp14 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 + tmp21 tmp24 = 2.0 tmp25 = tmp23 * tmp24 tmp26 = tmp22 - tmp25 tl.store(in_out_ptr0 + x2, tmp26, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_add_mul_sub_0[grid(16)](buf1, arg0_1, arg1_1, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 del arg1_1 return buf1, class PairwiseDistanceMatrixNew(nn.Module): def __init__(self): super(PairwiseDistanceMatrixNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
DreamBlack/APCNet
PairwiseDistanceMatrix
false
379
[ "MIT" ]
0
d76bc9e46c3b631035c5c67e2367b6fb80621333
https://github.com/DreamBlack/APCNet/tree/d76bc9e46c3b631035c5c67e2367b6fb80621333
FeatureEmbedder
import torch import numpy as np import torch.nn as nn from torch.utils import tensorboard as tensorboard class FeatureEmbedder(nn.Module): def __init__(self, d_feat, d_model): super(FeatureEmbedder, self).__init__() self.d_model = d_model self.embedder = nn.Linear(d_feat, d_model) self.activation = nn.ReLU() def forward(self, x): x = self.embedder(x) x = x * np.sqrt(self.d_model) x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'d_feat': 4, 'd_model': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn from torch.utils import tensorboard as tensorboard assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_relu_sqrt_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 2.0 tmp4 = tmp2 * tmp3 tmp5 = tl.full([1], 0, tl.int32) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp7 = 0.0 tmp8 = tmp6 <= tmp7 tl.store(in_out_ptr0 + x2, tmp6, xmask) tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_mul_relu_sqrt_threshold_backward_0[grid(256)](buf1, primals_2, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2 class FeatureEmbedderNew(nn.Module): def __init__(self, d_feat, d_model): super(FeatureEmbedderNew, self).__init__() self.d_model = d_model self.embedder = nn.Linear(d_feat, d_model) self.activation = nn.ReLU() def forward(self, input_0): primals_1 = self.embedder.weight primals_2 = self.embedder.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
valterlej/CustomBMT
FeatureEmbedder
false
16,852
[ "MIT" ]
157
c9326752d1355c81f845f2caab9c047be76067de
https://github.com/valterlej/CustomBMT/tree/c9326752d1355c81f845f2caab9c047be76067de
HuEtAl
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/x6/cx6flxcvcmjmahcfamgqxfm5bszfzqthaddwwzggkpyyncgapdg3.py # Topologically Sorted Source Nodes: [x_2, max_pool1d, x_3], Original ATen: [aten.convolution, aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward] # Source node to ATen node mapping: # max_pool1d => getitem_1 # x_2 => convolution # x_3 => relu # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {}) # %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%squeeze_2,), kwargs = {}) # %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {}) triton_poi_fused_convolution_max_pool2d_with_indices_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_max_pool2d_with_indices_relu_threshold_backward_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: '*fp32', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_max_pool2d_with_indices_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_max_pool2d_with_indices_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 20 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int8) tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp2) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp3, xmask) tl.store(out_ptr1 + (x3), tmp5, xmask) tl.store(out_ptr2 + (x3), tmp7, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/yy/cyy2iyhelbruzgtpkuv4lif6qt3iicbck2w7hamal2vbsoea75do.py # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.tanh] # Source node to ATen node mapping: # x_5 => tanh # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_tanh_1 = async_compile.triton('triton_poi_fused_tanh_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[512], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (20, 1, 1), (1, 1, 1)) assert_size_stride(primals_3, (20, ), (1, )) assert_size_stride(primals_4, (100, 80), (80, 1)) assert_size_stride(primals_5, (100, ), (1, )) assert_size_stride(primals_6, (4, 100), (100, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 4), (80, 4, 1)) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 20, 1, 4), (80, 4, 4, 1), torch.int8) buf3 = empty_strided_cuda((4, 20, 4), (80, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 20, 4), (80, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [x_2, max_pool1d, x_3], Original ATen: [aten.convolution, aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward] stream0 = get_raw_stream(0) triton_poi_fused_convolution_max_pool2d_with_indices_relu_threshold_backward_0.run(buf1, primals_3, buf2, buf3, buf7, 320, grid=grid(320), stream=stream0) del primals_3 buf4 = empty_strided_cuda((4, 100), (100, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf3, (4, 80), (80, 1), 0), reinterpret_tensor(primals_4, (80, 100), (1, 80), 0), out=buf4) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.tanh] triton_poi_fused_tanh_1.run(buf5, primals_5, 400, grid=grid(400), stream=stream0) del primals_5 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf6) del primals_7 return (buf6, primals_2, reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf1, (4, 20, 1, 4), (80, 4, 4, 1), 0), buf2, reinterpret_tensor(buf3, (4, 80), (80, 1), 0), buf5, primals_6, primals_4, buf7, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((20, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((100, 80), (80, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((100, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 100), (100, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import math import torch.utils import torch.utils.data import torch.nn as nn from torch.nn import init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_max_pool2d_with_indices_relu_threshold_backward_0( in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl. constexpr): xnumel = 320 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 20 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int8) tmp4 = tl.full([1], 0, tl.int32) tmp5 = triton_helpers.maximum(tmp4, tmp2) tmp6 = 0.0 tmp7 = tmp5 <= tmp6 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp3, xmask) tl.store(out_ptr1 + x3, tmp5, xmask) tl.store(out_ptr2 + x3, tmp7, xmask) @triton.jit def triton_poi_fused_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 400 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 100 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (20, 1, 1), (1, 1, 1)) assert_size_stride(primals_3, (20,), (1,)) assert_size_stride(primals_4, (100, 80), (80, 1)) assert_size_stride(primals_5, (100,), (1,)) assert_size_stride(primals_6, (4, 100), (100, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 20, 4), (80, 4, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 20, 1, 4), (80, 4, 4, 1), torch.int8) buf3 = empty_strided_cuda((4, 20, 4), (80, 4, 1), torch.float32) buf7 = empty_strided_cuda((4, 20, 4), (80, 4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_max_pool2d_with_indices_relu_threshold_backward_0[ grid(320)](buf1, primals_3, buf2, buf3, buf7, 320, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf4 = empty_strided_cuda((4, 100), (100, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (4, 80), (80, 1), 0), reinterpret_tensor(primals_4, (80, 100), (1, 80), 0), out=buf4) buf5 = buf4 del buf4 triton_poi_fused_tanh_1[grid(400)](buf5, primals_5, 400, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (100, 4), (1, 100), 0), alpha=1, beta=1, out=buf6) del primals_7 return buf6, primals_2, reinterpret_tensor(primals_1, (4, 1, 4), (4, 4, 1), 0), reinterpret_tensor(buf1, (4, 20, 1, 4), (80, 4, 4, 1), 0 ), buf2, reinterpret_tensor(buf3, (4, 80), (80, 1), 0 ), buf5, primals_6, primals_4, buf7 class HuEtAlNew(nn.Module): """ Deep Convolutional Neural Networks for Hyperspectral Image Classification Wei Hu, Yangyu Huang, Li Wei, Fan Zhang and Hengchao Li Journal of Sensors, Volume 2015 (2015) https://www.hindawi.com/journals/js/2015/258619/ """ @staticmethod def weight_init(m): if isinstance(m, nn.Linear) or isinstance(m, nn.Conv1d): init.uniform_(m.weight, -0.05, 0.05) init.zeros_(m.bias) def _get_final_flattened_size(self): with torch.no_grad(): x = torch.zeros(1, 1, self.input_channels) x = self.pool(self.conv(x)) return x.numel() def __init__(self, input_channels, n_classes, kernel_size=None, pool_size=None): super(HuEtAlNew, self).__init__() if kernel_size is None: kernel_size = math.ceil(input_channels / 9) if pool_size is None: pool_size = math.ceil(kernel_size / 5) self.input_channels = input_channels self.conv = nn.Conv1d(1, 20, kernel_size) self.pool = nn.MaxPool1d(pool_size) self.features_size = self._get_final_flattened_size() self.fc1 = nn.Linear(self.features_size, 100) self.fc2 = nn.Linear(100, n_classes) self.apply(self.weight_init) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_4 = self.fc1.weight primals_5 = self.fc1.bias primals_6 = self.fc2.weight primals_7 = self.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
giorgosouz/HSI-classification-using-state-of-the-art-models
HuEtAl
false
12,426
[ "MIT" ]
0
a925972ffe02c2cd1e5dde2b163e1faa854a4966
https://github.com/giorgosouz/HSI-classification-using-state-of-the-art-models/tree/a925972ffe02c2cd1e5dde2b163e1faa854a4966
Net
import torch import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self, input_size, out_size, drop_prob=0.5): super(Net, self).__init__() self.fc1 = nn.Linear(input_size, 256) self.fc2 = nn.Linear(256, out_size) self.drop_prob = drop_prob def forward(self, x): x = F.relu(self.fc1(x)) x = F.dropout(x, self.drop_prob, self.training) x = F.relu(self.fc2(x)) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_size': 4, 'out_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, None) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (256, 4), (4, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 256), (256, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 256), (256, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 256), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 256), (4096, 1024, 256, 1), 0 ) del buf0 buf5 = empty_strided_cuda((4, 4, 4, 256), (4096, 1024, 256, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16384)](buf1, primals_2, buf5, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf1, (64, 256), (256, 1), 0), reinterpret_tensor(primals_4, (256, 4), (1, 256), 0), out=buf2) buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf2 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool) triton_poi_fused_relu_threshold_backward_1[grid(256)](buf3, primals_5, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_5 return buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf1, (64, 256), (256, 1), 0 ), buf4, primals_4, buf5 class NetNew(nn.Module): def __init__(self, input_size, out_size, drop_prob=0.5): super(NetNew, self).__init__() self.fc1 = nn.Linear(input_size, 256) self.fc2 = nn.Linear(256, out_size) self.drop_prob = drop_prob def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
AlexMoreo/inntt
Net
false
18,410
[ "MIT" ]
2
6f48a37ad5b451f1fef0d2ca1c4c46dd5abc6689
https://github.com/AlexMoreo/inntt/tree/6f48a37ad5b451f1fef0d2ca1c4c46dd5abc6689
ReshapeF
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/ef/cef2l3ajjzplmrqfzg25ehcs22rwnbbsdt4r6sobviedge5satl2.py # Topologically Sorted Source Nodes: [pow_1, sum_1, norm, add, out], Original ATen: [aten.pow, aten.sum, aten.add, aten.div] # Source node to ATen node mapping: # add => add # norm => pow_2 # out => div # pow_1 => pow_1 # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 1e-07), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view, %add), kwargs = {}) triton_poi_fused_add_div_pow_sum_0 = async_compile.triton('triton_poi_fused_add_div_pow_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_pow_sum_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + ((16*x1) + (64*(y0 // 16)) + (y0 % 16)), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + ((64*(y0 // 16)) + (y0 % 16)), ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + (64*(y0 // 16)) + (y0 % 16)), ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + (64*(y0 // 16)) + (y0 % 16)), ymask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + (64*(y0 // 16)) + (y0 % 16)), ymask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x1 + (4*y0)), tmp15, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, norm, add, out], Original ATen: [aten.pow, aten.sum, aten.add, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_add_div_pow_sum_0.run(arg0_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.utils.data import torch import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_pow_sum_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (16 * x1 + 64 * (y0 // 16) + y0 % 16), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (64 * (y0 // 16) + y0 % 16), ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + 64 * (y0 // 16) + y0 % 16), ymask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + 64 * (y0 // 16) + y0 % 16), ymask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + 64 * (y0 // 16) + y0 % 16), ymask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-07 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x1 + 4 * y0), tmp15, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_pow_sum_0[grid(64, 4)](arg0_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) del arg0_1 return buf0, class Normalize(nn.Module): def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1.0 / self.power) out = x.div(norm + 1e-07) return out class ReshapeFNew(nn.Module): def __init__(self): super(ReshapeFNew, self).__init__() model = [nn.AdaptiveAvgPool2d(4)] self.model = nn.Sequential(*model) self.l2norm = Normalize(2) def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
bronemos/contrastive-unpaired-translation-focal
ReshapeF
false
3,248
[ "BSD-3-Clause" ]
0
50b9008d08a86439ede081a910d02df5da8e32df
https://github.com/bronemos/contrastive-unpaired-translation-focal/tree/50b9008d08a86439ede081a910d02df5da8e32df
LossesOfConVIRT
import torch import torch.nn as nn class LossesOfConVIRT(nn.Module): """ """ def __init__(self, tau=0.1, lambd=0.75): super(LossesOfConVIRT, self).__init__() self.tau = tau self.lambd = lambd def tmp_loss(self, v, u, index): """ """ assert v.size(0) == u.size(0) item1 = torch.exp(torch.divide(torch.cosine_similarity(v[index], u[ index], dim=0), self.tau)) item2 = torch.exp(torch.divide(torch.cosine_similarity(v[index]. unsqueeze(0), u, dim=1), self.tau)).sum() loss = -torch.log(torch.divide(item1, item2)) return loss def image_text(self, v, u, index): """ """ assert v.size(0) == u.size(0) cos = torch.nn.CosineSimilarity(dim=0) item1 = torch.exp(torch.divide(cos(v[index], u[index]), self.tau)) cos2 = torch.nn.CosineSimilarity(dim=1) item2 = torch.exp(torch.divide(cos2(v[index].unsqueeze(0), u), self .tau)).sum() loss = -torch.log(torch.divide(item1, item2)) return loss def text_image(self, v, u, index): """ """ assert v.size(0) == u.size(0) cos = torch.nn.CosineSimilarity(dim=0) item1 = torch.exp(torch.divide(cos(v[index], u[index]), self.tau)) cos2 = torch.nn.CosineSimilarity(dim=1) item2 = torch.exp(torch.divide(cos2(v, u[index].unsqueeze(0)), self .tau)).sum() loss = -torch.log(torch.divide(item1, item2)).item() return loss def forward(self, v, u): """ :return: """ assert v.size(0) == u.size(0) res = 0.0 v = v.float() u = u.float() for i in range(v.size(0)): res += self.lambd * self.image_text(v, u, i) + (1 - self.lambd ) * self.text_image(v, u, i) res /= v.size(0) return res def get_inputs(): return [torch.rand([4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp4 = tl.load(in_ptr0 + 1) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp8 = tl.load(in_ptr0 + 2) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp12 = tl.load(in_ptr0 + 3) tmp13 = tl.broadcast_to(tmp12, [XBLOCK]) tmp20 = tl.load(in_ptr1 + x2, xmask) tmp21 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp26 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp29 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp3 = tmp2 * tmp2 tmp6 = tmp5 * tmp5 tmp7 = tmp3 + tmp6 tmp10 = tmp9 * tmp9 tmp11 = tmp7 + tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp11 + tmp14 tmp16 = libdevice.sqrt(tmp15) tmp17 = 1e-08 tmp18 = triton_helpers.maximum(tmp16, tmp17) tmp19 = tmp0 / tmp18 tmp22 = tmp21 * tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp30 = tmp29 * tmp29 tmp31 = tmp28 + tmp30 tmp32 = libdevice.sqrt(tmp31) tmp33 = triton_helpers.maximum(tmp32, tmp17) tmp34 = tmp20 / tmp33 tmp35 = tmp19 * tmp34 tl.store(out_ptr0 + x2, tmp35, xmask) @triton.jit def triton_per_fused_clamp_min_div_exp_linalg_vector_norm_log_mul_neg_sum_1( in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl. constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp21 = tl.load(in_ptr2 + 4 * r0, None, eviction_policy='evict_last') tmp22 = tl.load(in_ptr2 + (1 + 4 * r0), None, eviction_policy='evict_last') tmp24 = tl.load(in_ptr2 + (2 + 4 * r0), None, eviction_policy='evict_last') tmp26 = tl.load(in_ptr2 + (3 + 4 * r0), None, eviction_policy='evict_last') tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp6 = tmp5 * tmp5 tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.sum(tmp7, 1)[:, None] tmp10 = libdevice.sqrt(tmp4) tmp11 = 1e-08 tmp12 = triton_helpers.maximum(tmp10, tmp11) tmp13 = tmp0 / tmp12 tmp14 = libdevice.sqrt(tmp9) tmp15 = triton_helpers.maximum(tmp14, tmp11) tmp16 = tmp5 / tmp15 tmp17 = tmp13 * tmp16 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp23 = tmp21 + tmp22 tmp25 = tmp23 + tmp24 tmp27 = tmp25 + tmp26 tmp28 = 10.0 tmp29 = tmp27 * tmp28 tmp30 = tl_math.exp(tmp29) tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.sum(tmp31, 1)[:, None] tmp34 = tmp20 * tmp28 tmp35 = tl_math.exp(tmp34) tmp36 = tmp35 / tmp33 tmp37 = tl_math.log(tmp36) tmp38 = -tmp37 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp38, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4), (4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_min_div_linalg_vector_norm_mul_0[grid(16)]( arg0_1, arg1_1, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf0 = empty_strided_cuda((1,), (1,), torch.float32) buf2 = reinterpret_tensor(buf0, (), (), 0) del buf0 buf5 = buf2 del buf2 triton_per_fused_clamp_min_div_exp_linalg_vector_norm_log_mul_neg_sum_1[ grid(1)](buf5, arg0_1, arg1_1, buf3, 1, 4, XBLOCK=1, num_warps= 2, num_stages=1) del arg0_1 del arg1_1 del buf3 return buf5, class LossesOfConVIRTNew(nn.Module): """ """ def __init__(self, tau=0.1, lambd=0.75): super(LossesOfConVIRTNew, self).__init__() self.tau = tau self.lambd = lambd def tmp_loss(self, v, u, index): """ """ assert v.size(0) == u.size(0) item1 = torch.exp(torch.divide(torch.cosine_similarity(v[index], u[ index], dim=0), self.tau)) item2 = torch.exp(torch.divide(torch.cosine_similarity(v[index]. unsqueeze(0), u, dim=1), self.tau)).sum() loss = -torch.log(torch.divide(item1, item2)) return loss def image_text(self, v, u, index): """ """ assert v.size(0) == u.size(0) cos = torch.nn.CosineSimilarity(dim=0) item1 = torch.exp(torch.divide(cos(v[index], u[index]), self.tau)) cos2 = torch.nn.CosineSimilarity(dim=1) item2 = torch.exp(torch.divide(cos2(v[index].unsqueeze(0), u), self .tau)).sum() loss = -torch.log(torch.divide(item1, item2)) return loss def text_image(self, v, u, index): """ """ assert v.size(0) == u.size(0) cos = torch.nn.CosineSimilarity(dim=0) item1 = torch.exp(torch.divide(cos(v[index], u[index]), self.tau)) cos2 = torch.nn.CosineSimilarity(dim=1) item2 = torch.exp(torch.divide(cos2(v, u[index].unsqueeze(0)), self .tau)).sum() loss = -torch.log(torch.divide(item1, item2)).item() return loss def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
funnyzhou/REFERS
LossesOfConVIRT
false
15,374
[ "MIT" ]
46
392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
https://github.com/funnyzhou/REFERS/tree/392eddf13cbf3c3a7dc0bf8bfffd108ca4a65a19
LocalDiscriminator
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim class LocalDiscriminator(nn.Module): """The local discriminator class. A network that analyses the relation between the output of the encoder y, and the feature map M. It is called "local" because it compares y with each one of the features in M. So if M is a [64, 6, 6] feature map, and y is a [32] vector, the comparison is done concatenating y along each one of the 6x6 features in M: (i) [32] -> [64, 1, 1]; (ii) [32] -> [64, 1, 2] ... (xxxvi) [32] -> [64, 6, 6]. This can be efficiently done expanding y to have same dimensionality as M such that: [32] torch.expand -> [32, 6, 6] and then concatenate on the channel dimension: [32, 6, 6] torch.cat(axis=0) -> [64, 6, 6] = [96, 6, 6] The tensor is then feed to the local discriminator. """ def __init__(self, y_size, M_channels): super().__init__() self.c0 = nn.Conv2d(y_size + M_channels, 256, kernel_size=1) self.c1 = nn.Conv2d(256, 256, kernel_size=1) self.c2 = nn.Conv2d(256, 1, kernel_size=1) def forward(self, x): h = F.relu(self.c0(x)) h = F.relu(self.c1(h)) return self.c2(h) def get_inputs(): return [torch.rand([4, 8, 64, 64])] def get_init_inputs(): return [[], {'y_size': 4, 'M_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 32 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 8 y1 = yindex // 8 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 8 * x2 + 32768 * y1), tmp0, ymask) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 256 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, None) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tl.store(in_out_ptr0 + x0, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (256, 8, 1, 1), (8, 1, 1, 1)) assert_size_stride(primals_2, (256,), (1,)) assert_size_stride(primals_3, (4, 8, 64, 64), (32768, 4096, 64, 1)) assert_size_stride(primals_4, (256, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_5, (256,), (1,)) assert_size_stride(primals_6, (1, 256, 1, 1), (256, 1, 1, 1)) assert_size_stride(primals_7, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 8, 64, 64), (32768, 1, 512, 8), torch .float32) get_raw_stream(0) triton_poi_fused_0[grid(32, 4096)](primals_3, buf0, 32, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(4194304)](buf2, primals_2, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 256, 64, 64), (1048576, 1, 16384, 256)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_1[grid(4194304)](buf4, primals_5, 4194304, XBLOCK=1024, num_warps=4, num_stages=1) del primals_5 buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 1, 64, 64), (4096, 1, 64, 1)) buf6 = reinterpret_tensor(buf5, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf5 triton_poi_fused_convolution_2[grid(16384)](buf6, primals_7, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_7 return buf6, primals_1, buf0, primals_4, primals_6, buf2, buf4 class LocalDiscriminatorNew(nn.Module): """The local discriminator class. A network that analyses the relation between the output of the encoder y, and the feature map M. It is called "local" because it compares y with each one of the features in M. So if M is a [64, 6, 6] feature map, and y is a [32] vector, the comparison is done concatenating y along each one of the 6x6 features in M: (i) [32] -> [64, 1, 1]; (ii) [32] -> [64, 1, 2] ... (xxxvi) [32] -> [64, 6, 6]. This can be efficiently done expanding y to have same dimensionality as M such that: [32] torch.expand -> [32, 6, 6] and then concatenate on the channel dimension: [32, 6, 6] torch.cat(axis=0) -> [64, 6, 6] = [96, 6, 6] The tensor is then feed to the local discriminator. """ def __init__(self, y_size, M_channels): super().__init__() self.c0 = nn.Conv2d(y_size + M_channels, 256, kernel_size=1) self.c1 = nn.Conv2d(256, 256, kernel_size=1) self.c2 = nn.Conv2d(256, 1, kernel_size=1) def forward(self, input_0): primals_1 = self.c0.weight primals_2 = self.c0.bias primals_4 = self.c1.weight primals_5 = self.c1.bias primals_6 = self.c2.weight primals_7 = self.c2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ValerioB88/self-supervised-relational-reasoning
LocalDiscriminator
false
9,680
[ "MIT" ]
0
12692b93d5c8dd3f56a31aa8b790366556e7a621
https://github.com/ValerioB88/self-supervised-relational-reasoning/tree/12692b93d5c8dd3f56a31aa8b790366556e7a621
BboxHead
import torch import torch.nn as nn from itertools import product as product class BboxHead(nn.Module): def __init__(self, inchannels=512, num_anchors=2): super(BboxHead, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=( 1, 1), stride=1, padding=0) def forward(self, x): out = self.conv1x1(x) out = out.permute(0, 2, 3, 1).contiguous() return out.view(out.shape[0], -1, 4) def get_inputs(): return [torch.rand([4, 512, 64, 64])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from itertools import product as product assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] tl.full([XBLOCK, YBLOCK], True, tl.int1) xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, YBLOCK], True, tl.int1) x2 = xindex y3 = yindex y0 = yindex % 512 y1 = yindex // 512 tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy= 'evict_last') tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None) @triton.jit def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x4 = xindex x0 = xindex % 8 tmp0 = tl.load(in_out_ptr0 + x4, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (8, 512, 1, 1), (512, 1, 1, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512 ), torch.float32) get_raw_stream(0) triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 8, 64, 64), (32768, 1, 512, 8)) buf2 = reinterpret_tensor(buf1, (4, 64, 64, 8), (32768, 512, 8, 1), 0) del buf1 buf3 = reinterpret_tensor(buf2, (4, 8192, 4), (32768, 4, 1), 0) del buf2 triton_poi_fused_clone_view_1[grid(131072)](buf3, primals_2, 131072, XBLOCK=1024, num_warps=4, num_stages=1) del primals_2 return buf3, primals_1, buf0 class BboxHeadNew(nn.Module): def __init__(self, inchannels=512, num_anchors=2): super(BboxHeadNew, self).__init__() self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=( 1, 1), stride=1, padding=0) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
huigs/retinaface-pytorch
BboxHead
false
10,241
[ "MIT" ]
0
0d7551d5863d172c2122bdd8d2d58be36e1b10fd
https://github.com/huigs/retinaface-pytorch/tree/0d7551d5863d172c2122bdd8d2d58be36e1b10fd
ConvReLUNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/cu/ccutvo2v4333pq6xhrg2zryqqwthm7dmmuqprvva2xdwiodpz5jn.py # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv1d => convolution # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/gy/cgylhqrp7uo6ulwwvwcaavkdj6lb3xbmk43eobqwwqruey33hhwc.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, clone, rsqrt, var_mean # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask) tmp9 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp2 + tmp4 tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tmp5 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp4 - tmp13 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp7 - tmp13 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 - tmp13 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp24 / tmp12 tmp26 = 1e-05 tmp27 = tmp25 + tmp26 tmp28 = libdevice.rsqrt(tmp27) tl.store(out_ptr0 + (x2), tmp13, xmask) tl.store(out_ptr1 + (x2), tmp28, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/pa/cpaccjq76xvjux47qgs7wy4lqiq65radyhdwdqtrvh44sdzkani6.py # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # layer_norm => add, add_1, clone, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%clone, [2]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_4), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_5), kwargs = {}) triton_poi_fused_native_layer_norm_2 = async_compile.triton('triton_poi_fused_native_layer_norm_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 4], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = (yindex // 4) y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (y3), ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + (y3), ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (x2 + (4*y3)), tmp10, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, ), (1, )) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 64, grid=grid(64), stream=stream0) del primals_2 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(buf1, buf2, buf3, 16, grid=grid(16), stream=stream0) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_2.run(buf1, buf2, buf3, primals_4, primals_5, buf4, 16, 4, grid=grid(16, 4), stream=stream0) del buf2 del buf3 del primals_5 return (reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0), primals_1, primals_3, primals_4, buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch import nn import torch.utils.data import torch.optim assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask) tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask) tmp6 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask) tmp9 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask) tmp1 = tl.full([1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp1, tmp3) tmp5 = tmp2 + tmp4 tmp7 = triton_helpers.maximum(tmp1, tmp6) tmp8 = tmp5 + tmp7 tmp10 = triton_helpers.maximum(tmp1, tmp9) tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp14 tmp16 = tmp4 - tmp13 tmp17 = tmp16 * tmp16 tmp18 = tmp15 + tmp17 tmp19 = tmp7 - tmp13 tmp20 = tmp19 * tmp19 tmp21 = tmp18 + tmp20 tmp22 = tmp10 - tmp13 tmp23 = tmp22 * tmp22 tmp24 = tmp21 + tmp23 tmp25 = tmp24 / tmp12 tmp26 = 1e-05 tmp27 = tmp25 + tmp26 tmp28 = libdevice.rsqrt(tmp27) tl.store(out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr1 + x2, tmp28, xmask) @triton.jit def triton_poi_fused_native_layer_norm_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl. constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + y3, ymask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr2 + y3, ymask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([1, 1], 0, tl.int32) tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = tmp2 - tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 * tmp7 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + (x2 + 4 * y3), tmp10, xmask & ymask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(64)](buf1, primals_2, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_2 buf2 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_native_layer_norm_1[grid(16)](buf1, buf2, buf3, 16, XBLOCK=16, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_2[grid(16, 4)](buf1, buf2, buf3, primals_4, primals_5, buf4, 16, 4, XBLOCK=4, YBLOCK=8, num_warps=1, num_stages=1) del buf2 del buf3 del primals_5 return reinterpret_tensor(buf4, (4, 4, 4), (16, 1, 4), 0 ), primals_1, primals_3, primals_4, buf1 class ConvReLUNormNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0): super(ConvReLUNormNew, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, padding=kernel_size // 2) self.norm = nn.LayerNorm(out_channels) self.dropout = nn.Dropout(dropout) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_4 = self.norm.weight primals_5 = self.norm.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
PiotrDabkowski/NeMo
ConvReLUNorm
false
11,785
[ "Apache-2.0" ]
0
7c251e9035b24136cf130f3caf760087e5ccf07c
https://github.com/PiotrDabkowski/NeMo/tree/7c251e9035b24136cf130f3caf760087e5ccf07c
MixerBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/lj/clj2tvu4xbd7d3fywia62geslao7fdlxxnfmtyxlxmvzwpd335xz.py # Topologically Sorted Source Nodes: [y, y_2], Original ATen: [aten.native_layer_norm, aten.clone] # Source node to ATen node mapping: # y => var_mean # y_2 => clone # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) triton_per_fused_clone_native_layer_norm_0 = async_compile.triton('triton_per_fused_clone_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clone_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_clone_native_layer_norm_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r2 = rindex % 4 r3 = (rindex // 4) % 4 r4 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0) tmp17 = tl.load(in_ptr0 + (r2 + (4*r4) + (16*r3) + (64*x0)), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp18 = tmp17 - tmp10 tmp19 = 64.0 tmp20 = tmp16 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp24 = tmp18 * tmp23 tl.store(out_ptr2 + (r1 + (64*x0)), tmp24, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/2o/c2oeyuwv7gxwqncpovtxrktvflvbh65akpqi3mvsjgsq3we4q3a7.py # Topologically Sorted Source Nodes: [y_2, y_3], Original ATen: [aten.add, aten.gelu] # Source node to ATen node mapping: # y_2 => add_1 # y_3 => add_2, erf, mul_1, mul_2, mul_3 # Graph fragment: # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.5), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_2,), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %add_2), kwargs = {}) triton_poi_fused_add_gelu_1 = async_compile.triton('triton_poi_fused_add_gelu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_gelu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_gelu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 0.7071067811865476 tmp6 = tmp2 * tmp5 tmp7 = libdevice.erf(tmp6) tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tl.store(out_ptr0 + (x2), tmp10, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/uy/cuyw6lvlmoibpqhmwpt6rbmwipmvm3c4fzsabl22jlquk4oeafts.py # Topologically Sorted Source Nodes: [x, y_6], Original ATen: [aten.add, aten.native_layer_norm, aten.native_layer_norm_backward] # Source node to ATen node mapping: # x => add_3 # y_6 => add_4, mul_4, rsqrt_1, sub_1, var_mean_1 # Graph fragment: # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %permute_3), kwargs = {}) # %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add_3, [1, 2, 3]), kwargs = {correction: 0, keepdim: True}) # %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {}) # %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %getitem_3), kwargs = {}) # %mul_4 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt_1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%rsqrt_1, 64), kwargs = {}) triton_per_fused_add_native_layer_norm_native_layer_norm_backward_2 = async_compile.triton('triton_per_fused_add_native_layer_norm_native_layer_norm_backward_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[4, 64], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_native_layer_norm_native_layer_norm_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_native_layer_norm_native_layer_norm_backward_2(in_ptr0, in_ptr1, in_ptr2, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 4 rnumel = 64 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r4 = rindex x0 = xindex r1 = rindex % 4 r2 = (rindex // 4) % 4 r3 = (rindex // 16) tmp0 = tl.load(in_ptr0 + (r4 + (64*x0)), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + (4*r3) + (16*r2) + (64*x0)), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r1), None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tmp7 = tl.where(xmask, tmp5, 0) tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.full([XBLOCK, 1], 64, tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 / tmp13 tmp15 = tmp5 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp4 - tmp14 tmp22 = 64.0 tmp23 = tmp20 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.rsqrt(tmp25) tmp27 = tmp21 * tmp26 tmp28 = 0.015625 tmp29 = tmp26 * tmp28 tl.store(out_ptr2 + (r4 + (64*x0)), tmp27, xmask) tl.store(out_ptr3 + (x0), tmp29, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/eq/ceqyrd3nyfgtw7jkadsbftrdzpqx3nyywoow4audc4qo3mi7uszw.py # Topologically Sorted Source Nodes: [y_8], Original ATen: [aten.gelu] # Source node to ATen node mapping: # y_8 => add_5, erf_1, mul_5, mul_6, mul_7 # Graph fragment: # %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 0.5), kwargs = {}) # %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 0.7071067811865476), kwargs = {}) # %erf_1 : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_6,), kwargs = {}) # %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf_1, 1), kwargs = {}) # %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %add_5), kwargs = {}) triton_poi_fused_gelu_3 = async_compile.triton('triton_poi_fused_gelu_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/o7/co7tipb7mdkmkmj4vmgi7njygfgnxxmdws2cagb5utwqarsejq5l.py # Topologically Sorted Source Nodes: [x, add_1], Original ATen: [aten.add] # Source node to ATen node mapping: # add_1 => add_6 # x => add_3 # Graph fragment: # %add_3 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %permute_3), kwargs = {}) # %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %view_7), kwargs = {}) triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = (xindex // 4) % 4 x2 = (xindex // 16) % 4 x3 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask) tmp2 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + (x4), xmask) tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + (x4), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4, ), (1, )) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y, y_2], Original ATen: [aten.native_layer_norm, aten.clone] stream0 = get_raw_stream(0) triton_per_fused_clone_native_layer_norm_0.run(primals_1, buf3, 4, 64, grid=grid(4), stream=stream0) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_2], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf4) del primals_2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_2, y_3], Original ATen: [aten.add, aten.gelu] triton_poi_fused_add_gelu_1.run(buf4, primals_3, buf5, 256, grid=grid(256), stream=stream0) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf6) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [x, y_6], Original ATen: [aten.add, aten.native_layer_norm, aten.native_layer_norm_backward] triton_per_fused_add_native_layer_norm_native_layer_norm_backward_2.run(primals_1, buf6, primals_5, buf10, buf15, 4, 64, grid=grid(4), stream=stream0) buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_7], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_7 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [y_8], Original ATen: [aten.gelu] triton_poi_fused_gelu_3.run(buf11, buf12, 256, grid=grid(256), stream=stream0) buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf13) buf14 = reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf13 # reuse # Topologically Sorted Source Nodes: [x, add_1], Original ATen: [aten.add] triton_poi_fused_add_4.run(buf14, primals_1, buf6, primals_5, primals_9, 256, grid=grid(256), stream=stream0) del buf6 del primals_1 del primals_5 del primals_9 return (buf14, primals_3, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0), buf10, buf11, reinterpret_tensor(buf12, (64, 4), (4, 1), 0), primals_8, primals_6, buf15, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_clone_native_layer_norm_0(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex r2 = rindex % 4 r3 = rindex // 4 % 4 r4 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp17 = tl.load(in_ptr0 + (r2 + 4 * r4 + 16 * r3 + 64 * x0), xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tl.where(xmask, tmp1, 0) tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tl.full([XBLOCK, 1], 64, tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp15 = tl.where(xmask, tmp13, 0) tmp16 = tl.sum(tmp15, 1)[:, None] tmp18 = tmp17 - tmp10 tmp19 = 64.0 tmp20 = tmp16 / tmp19 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tmp24 = tmp18 * tmp23 tl.store(out_ptr2 + (r1 + 64 * x0), tmp24, xmask) @triton.jit def triton_poi_fused_add_gelu_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.5 tmp4 = tmp2 * tmp3 tmp5 = 0.7071067811865476 tmp6 = tmp2 * tmp5 tmp7 = libdevice.erf(tmp6) tmp8 = 1.0 tmp9 = tmp7 + tmp8 tmp10 = tmp4 * tmp9 tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_per_fused_add_native_layer_norm_native_layer_norm_backward_2(in_ptr0 , in_ptr1, in_ptr2, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl. constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r4 = rindex x0 = xindex r1 = rindex % 4 r2 = rindex // 4 % 4 r3 = rindex // 16 tmp0 = tl.load(in_ptr0 + (r4 + 64 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 4 * r3 + 16 * r2 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr2 + r1, None, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK]) tl.where(xmask, tmp5, 0) tmp8 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK]) tmp10 = tl.where(xmask, tmp8, 0) tmp11 = tl.sum(tmp10, 1)[:, None] tmp12 = tl.full([XBLOCK, 1], 64, tl.int32) tmp13 = tmp12.to(tl.float32) tmp14 = tmp11 / tmp13 tmp15 = tmp5 - tmp14 tmp16 = tmp15 * tmp15 tmp17 = tl.broadcast_to(tmp16, [XBLOCK, RBLOCK]) tmp19 = tl.where(xmask, tmp17, 0) tmp20 = tl.sum(tmp19, 1)[:, None] tmp21 = tmp4 - tmp14 tmp22 = 64.0 tmp23 = tmp20 / tmp22 tmp24 = 1e-05 tmp25 = tmp23 + tmp24 tmp26 = libdevice.rsqrt(tmp25) tmp27 = tmp21 * tmp26 tmp28 = 0.015625 tmp29 = tmp26 * tmp28 tl.store(out_ptr2 + (r4 + 64 * x0), tmp27, xmask) tl.store(out_ptr3 + x0, tmp29, xmask) @triton.jit def triton_poi_fused_gelu_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 x1 = xindex // 4 % 4 x2 = xindex // 16 % 4 x3 = xindex // 64 tmp0 = tl.load(in_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr1 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask) tmp2 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_out_ptr0 + x4, xmask) tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp4 + tmp7 tl.store(in_out_ptr0 + x4, tmp8, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 4), (4, 1)) assert_size_stride(primals_9, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_per_fused_clone_native_layer_norm_0[grid(4)](primals_1, buf3, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf4) del primals_2 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_gelu_1[grid(256)](buf4, primals_3, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) buf6 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf5, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf6) buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) buf15 = empty_strided_cuda((4, 1, 1, 1), (1, 1, 1, 1), torch.float32) triton_per_fused_add_native_layer_norm_native_layer_norm_backward_2[ grid(4)](primals_1, buf6, primals_5, buf10, buf15, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf11 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11) del primals_7 buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_gelu_3[grid(256)](buf11, buf12, 256, XBLOCK=256, num_warps=4, num_stages=1) buf13 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf12, (64, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf13) buf14 = reinterpret_tensor(buf13, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf13 triton_poi_fused_add_4[grid(256)](buf14, primals_1, buf6, primals_5, primals_9, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del primals_1 del primals_5 del primals_9 return buf14, primals_3, reinterpret_tensor(buf3, (64, 4), (4, 1), 0 ), buf4, reinterpret_tensor(buf5, (64, 4), (4, 1), 0 ), buf10, buf11, reinterpret_tensor(buf12, (64, 4), (4, 1), 0 ), primals_8, primals_6, buf15, primals_4 class MlpBlock(nn.Module): def __init__(self, features, hidden_dim): super().__init__() self.hidden_dim = hidden_dim self.features = features self.fc1 = nn.Linear(self.features, self.hidden_dim) self.fc2 = nn.Linear(self.hidden_dim, int(self.features)) def forward(self, x): y = self.fc1(x) y = F.gelu(y) return self.fc2(y) class MixerBlockNew(nn.Module): """Mixer block layer""" def __init__(self, tokens_dim, tokens_mlp_dim, channels_dim, channels_mlp_dim): super().__init__() self.tokens_mlp_dim = tokens_mlp_dim self.tokens_dim = tokens_dim self.channels_mlp_dim = channels_mlp_dim self.channels_dim = channels_dim self.token_mixer = MlpBlock(self.tokens_dim, self.tokens_mlp_dim) self.channel_mixer = MlpBlock(self.channels_dim, self.channels_mlp_dim) def forward(self, input_0): primals_2 = self.token_mixer.fc1.weight primals_3 = self.token_mixer.fc1.bias primals_4 = self.token_mixer.fc2.weight primals_5 = self.token_mixer.fc2.bias primals_6 = self.channel_mixer.fc1.weight primals_7 = self.channel_mixer.fc1.bias primals_8 = self.channel_mixer.fc2.weight primals_9 = self.channel_mixer.fc2.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0]
amayuelas/NNKGReasoning
MixerBlock
false
6,191
[ "MIT" ]
1
0e3623b344fd4e3088ece897f898ddbb1f80888d
https://github.com/amayuelas/NNKGReasoning/tree/0e3623b344fd4e3088ece897f898ddbb1f80888d
Attention
import torch import torch.nn as nn class Attention(nn.Module): def __init__(self, dim, heads, dropout): super().__init__() self.heads = heads head_dim = dim // heads self.scale = head_dim ** -0.5 self.attn = None self.qkv = nn.Linear(dim, dim * 3) self.attn_drop = nn.Dropout(dropout) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(dropout) @property def unwrapped(self): return self def forward(self, x, mask=None): B, N, C = x.shape qkv = self.qkv(x).reshape(B, N, 3, self.heads, C // self.heads ).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] attn = q @ k.transpose(-2, -1) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x, attn def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'dim': 4, 'heads': 4, 'dropout': 0.5}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (4 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tmp4 = tmp3 * tmp1 tmp6 = tmp5 * tmp1 tmp7 = triton_helpers.maximum(tmp4, tmp6) tmp9 = tmp8 * tmp1 tmp10 = triton_helpers.maximum(tmp7, tmp9) tmp12 = tmp11 * tmp1 tmp13 = triton_helpers.maximum(tmp10, tmp12) tmp14 = tmp2 - tmp13 tmp15 = tmp14 * tmp1 tmp16 = tl_math.exp(tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_clone_4(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (8 + y0 + 12 * x2 + 48 * y1), xmask & ymask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (8 + y0), ymask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask & ymask) @triton.jit def triton_poi_fused_clone_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y0 = yindex % 4 y1 = yindex // 4 y3 = yindex tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (12, 4), (4, 1)) assert_size_stride(primals_3, (12,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 12), (12, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 12), (1, 4), 0), out=buf0) del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 4)](buf0, primals_3, buf1, 16, 4, XBLOCK=2, YBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(16, 4)](buf0, primals_3, buf2, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf1, (16, 4, 1), (4, 1, 0), 0), reinterpret_tensor(buf2, (16, 1, 4), (4, 0, 1), 0), out=buf3) buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused__softmax_2[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf3 triton_poi_fused__softmax_3[grid(256)](buf4, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf4 buf6 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32) triton_poi_fused_clone_4[grid(16, 4)](buf0, primals_3, buf6, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) del buf0 del primals_3 buf7 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (16, 4, 1), (4, 1, 0), 0), out=buf7) buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) triton_poi_fused_clone_5[grid(16, 4)](buf7, buf8, 16, 4, XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1) buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0) del buf7 extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf9) buf10 = reinterpret_tensor(buf9, (4, 4, 4), (16, 4, 1), 0) del buf9 triton_poi_fused_add_6[grid(64)](buf10, primals_5, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_5 return buf10, buf5, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0 ), buf5, reinterpret_tensor(buf8, (16, 4), (4, 1), 0 ), primals_4, reinterpret_tensor(buf6, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf1, (16, 1, 4), (4, 1, 1), 0 ), reinterpret_tensor(buf2, (16, 4, 1), (4, 1, 4), 0) class AttentionNew(nn.Module): def __init__(self, dim, heads, dropout): super().__init__() self.heads = heads head_dim = dim // heads self.scale = head_dim ** -0.5 self.attn = None self.qkv = nn.Linear(dim, dim * 3) self.attn_drop = nn.Dropout(dropout) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(dropout) @property def unwrapped(self): return self def forward(self, input_0): primals_2 = self.qkv.weight primals_3 = self.qkv.bias primals_4 = self.proj.weight primals_5 = self.proj.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
shampooma/segmenter
Attention
false
16,411
[ "MIT" ]
418
b08fd481da6758e37d108ba28676229b62f757aa
https://github.com/shampooma/segmenter/tree/b08fd481da6758e37d108ba28676229b62f757aa
ConvMlp
import torch from torch import nn import torch.cuda class ConvMlp(nn.Module): """ MLP using 1x1 convs that keeps spatial dims """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity( ) self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.norm(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.cuda assert_size_stride = torch._C._dynamo.guards.assert_size_stride @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_2, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1)) buf3 = buf2 del buf2 triton_poi_fused_convolution_1[grid(256)](buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_5 return buf3, primals_1, primals_3, primals_4, buf1 class ConvMlpNew(nn.Module): """ MLP using 1x1 convs that keeps spatial dims """ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.0): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True) self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity( ) self.act = act_layer() self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True) self.drop = nn.Dropout(drop) def forward(self, input_0): primals_1 = self.fc1.weight primals_2 = self.fc1.bias primals_4 = self.fc2.weight primals_5 = self.fc2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
LoveEachDay/towhee
ConvMlp
false
11,659
[ "Apache-2.0" ]
0
513c9c2626676cadaaf0a16ac3c828d96bec91a1
https://github.com/LoveEachDay/towhee/tree/513c9c2626676cadaaf0a16ac3c828d96bec91a1
RgbaToBgr
import torch import torch.nn as nn def bgr_to_rgb(image: 'torch.Tensor') ->torch.Tensor: """Convert a BGR image to RGB. Args: image (torch.Tensor): BGR Image to be converted to BGR of shape :math:`(*,3,H,W)`. Returns: torch.Tensor: RGB version of the image with shape of shape :math:`(*,3,H,W)`. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = bgr_to_rgb(input) # 2x3x4x5 """ if not isinstance(image, torch.Tensor): raise TypeError('Input type is not a torch.Tensor. Got {}'.format( type(image))) if len(image.shape) < 3 or image.shape[-3] != 3: raise ValueError('Input size must have a shape of (*, 3, H, W).Got {}' .format(image.shape)) out: 'torch.Tensor' = image.flip(-3) return out def rgb_to_bgr(image: 'torch.Tensor') ->torch.Tensor: """Convert a RGB image to BGR. Args: image (torch.Tensor): RGB Image to be converted to BGRof of shape :math:`(*,3,H,W)`. Returns: torch.Tensor: BGR version of the image with shape of shape :math:`(*,3,H,W)`. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_bgr(input) # 2x3x4x5 """ if not isinstance(image, torch.Tensor): raise TypeError('Input type is not a torch.Tensor. Got {}'.format( type(image))) if len(image.shape) < 3 or image.shape[-3] != 3: raise ValueError('Input size must have a shape of (*, 3, H, W).Got {}' .format(image.shape)) return bgr_to_rgb(image) def rgba_to_rgb(image: 'torch.Tensor') ->torch.Tensor: """Convert an image from RGBA to RGB. Args: image (torch.Tensor): RGBA Image to be converted to RGB of shape :math:`(*,4,H,W)`. Returns: torch.Tensor: RGB version of the image with shape :math:`(*,3,H,W)`. Example: >>> input = torch.rand(2, 4, 4, 5) >>> output = rgba_to_rgb(input) # 2x3x4x5 """ if not isinstance(image, torch.Tensor): raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}') if len(image.shape) < 3 or image.shape[-3] != 4: raise ValueError( f'Input size must have a shape of (*, 4, H, W).Got {image.shape}') r, g, b, a = torch.chunk(image, image.shape[-3], dim=-3) a_one = torch.tensor(1.0) - a a_one * r + a * r a_one * g + a * g a_one * b + a * b return torch.cat([r, g, b], dim=-3) def rgba_to_bgr(image: 'torch.Tensor') ->torch.Tensor: """Convert an image from RGBA to BGR. Args: image (torch.Tensor): RGBA Image to be converted to BGR of shape :math:`(*,4,H,W)`. Returns: torch.Tensor: RGB version of the image with shape :math:`(*,3,H,W)`. Example: >>> input = torch.rand(2, 4, 4, 5) >>> output = rgba_to_bgr(input) # 2x3x4x5 """ if not isinstance(image, torch.Tensor): raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}') if len(image.shape) < 3 or image.shape[-3] != 4: raise ValueError( f'Input size must have a shape of (*, 4, H, W).Got {image.shape}') x_rgb: 'torch.Tensor' = rgba_to_rgb(image) return rgb_to_bgr(x_rgb) class RgbaToBgr(nn.Module): """Convert an image from RGBA to BGR. Remove an alpha channel from BGR image. Returns: torch.Tensor: BGR version of the image. Shape: - image: :math:`(*, 4, H, W)` - output: :math:`(*, 3, H, W)` Example: >>> input = torch.rand(2, 4, 4, 5) >>> rgba = RgbaToBgr() >>> output = rgba(input) # 2x3x4x5 """ def __init__(self) ->None: super(RgbaToBgr, self).__init__() def forward(self, image: 'torch.Tensor') ->torch.Tensor: return rgba_to_bgr(image) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_cat_flip_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 3 x0 = xindex % 16 x2 = xindex // 48 x3 = xindex tmp0 = 2 + -1 * x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 3, tl.int64) tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x3, tmp16, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 3, 4, 4), (48, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_flip_0[grid(192)](arg0_1, buf0, 192, XBLOCK= 256, num_warps=4, num_stages=1) del arg0_1 return buf0, def bgr_to_rgb(image: 'torch.Tensor') ->torch.Tensor: """Convert a BGR image to RGB. Args: image (torch.Tensor): BGR Image to be converted to BGR of shape :math:`(*,3,H,W)`. Returns: torch.Tensor: RGB version of the image with shape of shape :math:`(*,3,H,W)`. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = bgr_to_rgb(input) # 2x3x4x5 """ if not isinstance(image, torch.Tensor): raise TypeError('Input type is not a torch.Tensor. Got {}'.format( type(image))) if len(image.shape) < 3 or image.shape[-3] != 3: raise ValueError('Input size must have a shape of (*, 3, H, W).Got {}' .format(image.shape)) out: 'torch.Tensor' = image.flip(-3) return out def rgb_to_bgr(image: 'torch.Tensor') ->torch.Tensor: """Convert a RGB image to BGR. Args: image (torch.Tensor): RGB Image to be converted to BGRof of shape :math:`(*,3,H,W)`. Returns: torch.Tensor: BGR version of the image with shape of shape :math:`(*,3,H,W)`. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_bgr(input) # 2x3x4x5 """ if not isinstance(image, torch.Tensor): raise TypeError('Input type is not a torch.Tensor. Got {}'.format( type(image))) if len(image.shape) < 3 or image.shape[-3] != 3: raise ValueError('Input size must have a shape of (*, 3, H, W).Got {}' .format(image.shape)) return bgr_to_rgb(image) def rgba_to_rgb(image: 'torch.Tensor') ->torch.Tensor: """Convert an image from RGBA to RGB. Args: image (torch.Tensor): RGBA Image to be converted to RGB of shape :math:`(*,4,H,W)`. Returns: torch.Tensor: RGB version of the image with shape :math:`(*,3,H,W)`. Example: >>> input = torch.rand(2, 4, 4, 5) >>> output = rgba_to_rgb(input) # 2x3x4x5 """ if not isinstance(image, torch.Tensor): raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}') if len(image.shape) < 3 or image.shape[-3] != 4: raise ValueError( f'Input size must have a shape of (*, 4, H, W).Got {image.shape}') r, g, b, a = torch.chunk(image, image.shape[-3], dim=-3) a_one = torch.tensor(1.0) - a a_one * r + a * r a_one * g + a * g a_one * b + a * b return torch.cat([r, g, b], dim=-3) def rgba_to_bgr(image: 'torch.Tensor') ->torch.Tensor: """Convert an image from RGBA to BGR. Args: image (torch.Tensor): RGBA Image to be converted to BGR of shape :math:`(*,4,H,W)`. Returns: torch.Tensor: RGB version of the image with shape :math:`(*,3,H,W)`. Example: >>> input = torch.rand(2, 4, 4, 5) >>> output = rgba_to_bgr(input) # 2x3x4x5 """ if not isinstance(image, torch.Tensor): raise TypeError(f'Input type is not a torch.Tensor. Got {type(image)}') if len(image.shape) < 3 or image.shape[-3] != 4: raise ValueError( f'Input size must have a shape of (*, 4, H, W).Got {image.shape}') x_rgb: 'torch.Tensor' = rgba_to_rgb(image) return rgb_to_bgr(x_rgb) class RgbaToBgrNew(nn.Module): """Convert an image from RGBA to BGR. Remove an alpha channel from BGR image. Returns: torch.Tensor: BGR version of the image. Shape: - image: :math:`(*, 4, H, W)` - output: :math:`(*, 3, H, W)` Example: >>> input = torch.rand(2, 4, 4, 5) >>> rgba = RgbaToBgr() >>> output = rgba(input) # 2x3x4x5 """ def __init__(self) ->None: super(RgbaToBgrNew, self).__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
NickleDave/kornia
RgbaToBgr
false
2,683
[ "ECL-2.0", "Apache-2.0" ]
0
5392651d0bc268da577fa0a49aa50f957289c7dd
https://github.com/NickleDave/kornia/tree/5392651d0bc268da577fa0a49aa50f957289c7dd
EqualLinearActModule
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/4q/c4qwjg3oiykrck3lufgrneimjnjyzxvb3c3h6obmpachvwty6igj.py # Topologically Sorted Source Nodes: [sqrt, mul_1, weight], Original ATen: [aten.sqrt, aten.mul] # Source node to ATen node mapping: # mul_1 => mul_1 # sqrt => full_default_1 # weight => mul_2 # Graph fragment: # %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.5), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %full_default_1), kwargs = {}) # %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, 1.0), kwargs = {}) triton_poi_fused_mul_sqrt_0 = async_compile.triton('triton_poi_fused_mul_sqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + (x0), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_9/inductor_cache/2o/c2oqkq7zaubqmw7vuixxlseb2ff5jzqqbyczicxlmsahuxwdpdyp.py # Topologically Sorted Source Nodes: [mul_3], Original ATen: [aten.mul] # Source node to ATen node mapping: # mul_3 => mul_3 # Graph fragment: # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, 1.0), kwargs = {}) triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + (x0), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sqrt, mul_1, weight], Original ATen: [aten.sqrt, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_mul_sqrt_0.run(primals_2, buf0, 16, grid=grid(16), stream=stream0) del primals_2 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) # Topologically Sorted Source Nodes: [mul_3], Original ATen: [aten.mul] triton_poi_fused_mul_1.run(primals_3, buf1, 4, grid=grid(4), stream=stream0) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul_3], Original ATen: [aten.mul] extern_kernels.addmm(buf1, primals_1, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 return (buf2, buf0, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from copy import deepcopy import torch.nn as nn from functools import partial from torch.nn.init import _calculate_correct_fan assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_mul_sqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 1.0 tmp4 = tmp2 * tmp3 tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused_mul_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sqrt_0[grid(16)](primals_2, buf0, 16, XBLOCK= 16, num_warps=1, num_stages=1) del primals_2 buf1 = empty_strided_cuda((4,), (1,), torch.float32) triton_poi_fused_mul_1[grid(4)](primals_3, buf1, 4, XBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(buf1, primals_1, reinterpret_tensor(buf0, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2) del buf1 return buf2, buf0, primals_1 def equalized_lr(module, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ EqualizedLR.apply(module, name, gain=gain, mode=mode, lr_mul=lr_mul) return module class EqualizedLR: """Equalized Learning Rate. This trick is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation The general idea is to dynamically rescale the weight in training instead of in initializing so that the variance of the responses in each layer is guaranteed with some statistical properties. Note that this function is always combined with a convolution module which is initialized with :math:`\\mathcal{N}(0, 1)`. Args: name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. """ def __init__(self, name='weight', gain=2 ** 0.5, mode='fan_in', lr_mul=1.0 ): self.name = name self.mode = mode self.gain = gain self.lr_mul = lr_mul def compute_weight(self, module): """Compute weight with equalized learning rate. Args: module (nn.Module): A module that is wrapped with equalized lr. Returns: torch.Tensor: Updated weight. """ weight = getattr(module, self.name + '_orig') if weight.ndim == 5: fan = _calculate_correct_fan(weight[0], self.mode) else: assert weight.ndim <= 4 fan = _calculate_correct_fan(weight, self.mode) weight = weight * torch.tensor(self.gain, device=weight.device ) * torch.sqrt(torch.tensor(1.0 / fan, device=weight.device) ) * self.lr_mul return weight def __call__(self, module, inputs): """Standard interface for forward pre hooks.""" setattr(module, self.name, self.compute_weight(module)) @staticmethod def apply(module, name, gain=2 ** 0.5, mode='fan_in', lr_mul=1.0): """Apply function. This function is to register an equalized learning rate hook in an ``nn.Module``. Args: module (nn.Module): Module to be wrapped. name (str | optional): The name of weights. Defaults to 'weight'. mode (str, optional): The mode of computing ``fan`` which is the same as ``kaiming_init`` in pytorch. You can choose one from ['fan_in', 'fan_out']. Defaults to 'fan_in'. Returns: nn.Module: Module that is registered with equalized lr hook. """ for _, hook in module._forward_pre_hooks.items(): if isinstance(hook, EqualizedLR): raise RuntimeError( f'Cannot register two equalized_lr hooks on the same parameter {name} in {module} module.' ) fn = EqualizedLR(name, gain=gain, mode=mode, lr_mul=lr_mul) weight = module._parameters[name] delattr(module, name) module.register_parameter(name + '_orig', weight) setattr(module, name, weight.data) module.register_forward_pre_hook(fn) return fn class EqualizedLRLinearModule(nn.Linear): """Equalized LR LinearModule. In this module, we adopt equalized lr in ``nn.Linear``. The equalized learning rate is proposed in: Progressive Growing of GANs for Improved Quality, Stability, and Variation Note that, the initialization of ``self.weight`` will be overwritten as :math:`\\mathcal{N}(0, 1)`. Args: equalized_lr_cfg (dict | None, optional): Config for ``EqualizedLR``. If ``None``, equalized learning rate is ignored. Defaults to dict(mode='fan_in'). """ def __init__(self, *args, equalized_lr_cfg=dict(mode='fan_in'), **kwargs): super().__init__(*args, **kwargs) self.with_equalized_lr = equalized_lr_cfg is not None if self.with_equalized_lr: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if self.with_equalized_lr: equalized_lr(self, **equalized_lr_cfg) self._init_linear_weights() def _init_linear_weights(self): """Initialize linear weights as described in PGGAN.""" nn.init.normal_(self.weight, 0, 1.0 / self.lr_mul) if self.bias is not None: nn.init.constant_(self.bias, 0.0) class EqualLinearActModuleNew(nn.Module): """Equalized LR Linear Module with Activation Layer. This module is modified from ``EqualizedLRLinearModule`` defined in PGGAN. The major features updated in this module is adding support for activation layers used in StyleGAN2. Args: equalized_lr_cfg (dict | None, optional): Config for equalized lr. Defaults to dict(gain=1., lr_mul=1.). bias (bool, optional): Whether to use bias item. Defaults to True. bias_init (float, optional): The value for bias initialization. Defaults to ``0.``. act_cfg (dict | None, optional): Config for activation layer. Defaults to None. """ def __init__(self, *args, equalized_lr_cfg=dict(gain=1.0, lr_mul=1.0), bias=True, bias_init=0.0, act_cfg=None, **kwargs): super().__init__() self.with_activation = act_cfg is not None self.linear = EqualizedLRLinearModule(*args, bias=False, equalized_lr_cfg=equalized_lr_cfg, **kwargs) if equalized_lr_cfg is not None: self.lr_mul = equalized_lr_cfg.get('lr_mul', 1.0) else: self.lr_mul = 1.0 if bias: self.bias = nn.Parameter(torch.zeros(self.linear.out_features). fill_(bias_init)) else: self.bias = None if self.with_activation: act_cfg = deepcopy(act_cfg) if act_cfg['type'] == 'fused_bias': self.act_type = act_cfg.pop('type') assert self.bias is not None self.activate = partial(fused_bias_leakyrelu, **act_cfg) else: self.act_type = 'normal' self.activate = build_activation_layer(act_cfg) else: self.act_type = None def forward(self, input_0): primals_3 = self.bias primals_1 = self.linear.weight_orig primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
jiangwenj02/mmgeneration
EqualLinearActModule
false
12,614
[ "Apache-2.0" ]
0
da9ad377ae19260467fc332ddb88f505c38a915a
https://github.com/jiangwenj02/mmgeneration/tree/da9ad377ae19260467fc332ddb88f505c38a915a
HighwayNetwork
import torch import torch.nn as nn import torch.nn.functional as F class HighwayNetwork(nn.Module): def __init__(self, size): super().__init__() self.W1 = nn.Linear(size, size) self.W2 = nn.Linear(size, size) self.W1.bias.data.fill_(0.0) def forward(self, x): x1 = self.W1(x) x2 = self.W2(x) g = torch.sigmoid(x2) y = g * F.relu(x1) + (1.0 - g) * x return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_mul_relu_rsub_sigmoid_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp8 = tl.load(in_ptr2 + x0, xmask) tmp1 = tl.sigmoid(tmp0) tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = tmp1 * tmp4 tmp6 = 1.0 tmp7 = tmp6 - tmp1 tmp9 = tmp7 * tmp8 tmp10 = tmp5 + tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mul_relu_rsub_sigmoid_0[grid(256)](buf1, buf0, primals_3, buf2, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf2, primals_3, buf0, buf1 class HighwayNetworkNew(nn.Module): def __init__(self, size): super().__init__() self.W1 = nn.Linear(size, size) self.W2 = nn.Linear(size, size) self.W1.bias.data.fill_(0.0) def forward(self, input_0): primals_1 = self.W1.weight primals_2 = self.W1.bias primals_4 = self.W2.weight primals_5 = self.W2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
Rongjiehuang/Multiband-WaveRNN
HighwayNetwork
false
8,726
[ "MIT" ]
18
432e449678220eed841fcb4971415e2e0ac4d9bb
https://github.com/Rongjiehuang/Multiband-WaveRNN/tree/432e449678220eed841fcb4971415e2e0ac4d9bb
AvgPool
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] # Source node to ATen node mapping: # avg_pool2d => avg_pool2d # Graph fragment: # %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [4, 4]), kwargs = {}) triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + (x0), tmp32, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d] stream0 = get_raw_stream(0) triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 + tmp0 tmp4 = tmp3 + tmp2 tmp6 = tmp5 + tmp4 tmp8 = tmp7 + tmp6 tmp10 = tmp9 + tmp8 tmp12 = tmp11 + tmp10 tmp14 = tmp13 + tmp12 tmp16 = tmp15 + tmp14 tmp18 = tmp17 + tmp16 tmp20 = tmp19 + tmp18 tmp22 = tmp21 + tmp20 tmp24 = tmp23 + tmp22 tmp26 = tmp25 + tmp24 tmp28 = tmp27 + tmp26 tmp30 = tmp29 + tmp28 tmp31 = 0.0625 tmp32 = tmp30 * tmp31 tl.store(out_ptr0 + x0, tmp32, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) get_raw_stream(0) triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del arg0_1 return buf0, class AvgPoolNew(nn.Module): def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Thagio/kaggle-aptos
AvgPool
false
9,543
[ "MIT" ]
0
f565335d34b46b7fa7ca925b7d325397df8e1fee
https://github.com/Thagio/kaggle-aptos/tree/f565335d34b46b7fa7ca925b7d325397df8e1fee
RightSVDLayer
import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter class RightSVDLayer(nn.Module): def __init__(self, iw, ow, dropout=None, bias=True): super().__init__() self.weight = Parameter(torch.Tensor(iw, ow)) self.dropout = dropout if bias: self.bias = Parameter(torch.Tensor(ow)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fin, _ = nn.init._calculate_fan_in_and_fan_out(self.weight) bound = 1.0 / math.sqrt(fin / 2.0) nn.init.uniform_(self.bias, -bound, bound) def forward(self, x): y = x.matmul(self.weight) if self.bias is not None: y = y + self.bias if self.dropout is not None: y = F.dropout(y, p=self.dropout) return y def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'iw': 4, 'ow': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), primals_2, out=buf0) del primals_2 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_add_0[grid(256)](buf1, primals_3, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 return buf1, reinterpret_tensor(primals_1, (4, 64), (1, 4), 0) class RightSVDLayerNew(nn.Module): def __init__(self, iw, ow, dropout=None, bias=True): super().__init__() self.weight = Parameter(torch.Tensor(iw, ow)) self.dropout = dropout if bias: self.bias = Parameter(torch.Tensor(ow)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fin, _ = nn.init._calculate_fan_in_and_fan_out(self.weight) bound = 1.0 / math.sqrt(fin / 2.0) nn.init.uniform_(self.bias, -bound, bound) def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
collodi/ml_svd
RightSVDLayer
false
1,733
[ "MIT" ]
0
67a608ca10d3d37bf861e4e7490e62d298fa83b9
https://github.com/collodi/ml_svd/tree/67a608ca10d3d37bf861e4e7490e62d298fa83b9
Conv2dBlock
import torch import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = None self.bias = None self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not None, 'Please assign weight and bias before calling AdaIN!' b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(x_reshaped, running_mean, running_var, self. weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class Conv2dBlock(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlock, self).__init__() self.use_bias = True if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, x): x = self.pad(x) x = self.conv(x) if self.norm: x = self.norm(x) if self.activation: x = self.activation(x) return x def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'input_dim': 4, 'output_dim': 4, 'kernel_size': 4, 'stride': 1}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 1, 1), (4, 1, 1, 1)) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool) get_raw_stream(0) triton_poi_fused_convolution_relu_threshold_backward_0[grid(16)](buf1, primals_3, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf1, primals_1, primals_2, buf2 class AdaptiveInstanceNorm2d(nn.Module): def __init__(self, num_features, eps=1e-05, momentum=0.1): super(AdaptiveInstanceNorm2d, self).__init__() self.num_features = num_features self.eps = eps self.momentum = momentum self.weight = None self.bias = None self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) def forward(self, x): assert self.weight is not None and self.bias is not None, 'Please assign weight and bias before calling AdaIN!' b, c = x.size(0), x.size(1) running_mean = self.running_mean.repeat(b) running_var = self.running_var.repeat(b) x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:]) out = F.batch_norm(x_reshaped, running_mean, running_var, self. weight, self.bias, True, self.momentum, self.eps) return out.view(b, c, *x.size()[2:]) def __repr__(self): return self.__class__.__name__ + '(' + str(self.num_features) + ')' class LayerNorm(nn.Module): def __init__(self, num_features, eps=1e-05, affine=True): super(LayerNorm, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps if self.affine: self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) self.beta = nn.Parameter(torch.zeros(num_features)) def forward(self, x): shape = [-1] + [1] * (x.dim() - 1) if x.size(0) == 1: mean = x.view(-1).mean().view(*shape) std = x.view(-1).std().view(*shape) else: mean = x.view(x.size(0), -1).mean(1).view(*shape) std = x.view(x.size(0), -1).std(1).view(*shape) x = (x - mean) / (std + self.eps) if self.affine: shape = [1, -1] + [1] * (x.dim() - 2) x = x * self.gamma.view(*shape) + self.beta.view(*shape) return x class Conv2dBlockNew(nn.Module): def __init__(self, input_dim, output_dim, kernel_size, stride, padding= 0, norm='none', activation='relu', pad_type='zero'): super(Conv2dBlockNew, self).__init__() self.use_bias = True if pad_type == 'reflect': self.pad = nn.ReflectionPad2d(padding) elif pad_type == 'replicate': self.pad = nn.ReplicationPad2d(padding) elif pad_type == 'zero': self.pad = nn.ZeroPad2d(padding) else: assert 0, 'Unsupported padding type: {}'.format(pad_type) norm_dim = output_dim if norm == 'bn': self.norm = nn.BatchNorm2d(norm_dim) elif norm == 'in': self.norm = nn.InstanceNorm2d(norm_dim) elif norm == 'ln': self.norm = LayerNorm(norm_dim) elif norm == 'adain': self.norm = AdaptiveInstanceNorm2d(norm_dim) elif norm == 'none' or norm == 'sn': self.norm = None else: assert 0, 'Unsupported normalization: {}'.format(norm) if activation == 'relu': self.activation = nn.ReLU(inplace=True) elif activation == 'lrelu': self.activation = nn.LeakyReLU(0.2, inplace=True) elif activation == 'prelu': self.activation = nn.PReLU() elif activation == 'selu': self.activation = nn.SELU(inplace=True) elif activation == 'tanh': self.activation = nn.Tanh() elif activation == 'none': self.activation = None else: assert 0, 'Unsupported activation: {}'.format(activation) if norm == 'sn': self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)) else: self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias) def forward(self, input_0): primals_1 = self.conv.weight primals_3 = self.conv.bias primals_2 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
FUTUREEEEEE/S2R-DepthNet
Conv2dBlock
false
2,249
[ "MIT" ]
0
415cc40aef10f9540026ff435d14a9ba9e30ad74
https://github.com/FUTUREEEEEE/S2R-DepthNet/tree/415cc40aef10f9540026ff435d14a9ba9e30ad74
ParentChildClassifier
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/5s/c5s7q2ybzwodeuorig3awcwebk53edcn54ogeiiskva3k6c4iqxd.py # Topologically Sorted Source Nodes: [encodings], Original ATen: [aten.cat] # Source node to ATen node mapping: # encodings => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %primals_3, %primals_1], 1), kwargs = {}) triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = (xindex // 12) x2 = xindex tmp0 = x0 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.load(in_ptr2 + ((4*x1) + ((-8) + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + (x2), tmp16, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/qk/cqk5cnmrffdyh6x4yseboivxypitwp75t6ywqxjlpiff6wcgokiw.py # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] # Source node to ATen node mapping: # relu => relu # Graph fragment: # %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_5), kwargs = {}) # %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {}) triton_poi_fused_relu_1 = async_compile.triton('triton_poi_fused_relu_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + (x2), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_2/inductor_cache/ot/cotlrpakv2v3t26mv5uv43mmk4p5gjca2ac6fnaoq5bvsnejubif.py # Topologically Sorted Source Nodes: [log_probs], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_probs => amax, exp, log, sub, sub_1, sum_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {}) # %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {}) # %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {}) triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 2) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tmp1 - tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 - tmp3 tmp8 = tl_math.exp(tmp7) tmp9 = tmp6 + tmp8 tmp10 = tl_math.log(tmp9) tmp11 = tmp4 - tmp10 tl.store(out_ptr0 + (x2), tmp11, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (2, 4), (4, 1)) assert_size_stride(primals_7, (2, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) # Topologically Sorted Source Nodes: [encodings], Original ATen: [aten.cat] stream0 = get_raw_stream(0) triton_poi_fused_cat_0.run(primals_2, primals_3, primals_1, buf0, 48, grid=grid(48), stream=stream0) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), out=buf1) del primals_4 buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu] triton_poi_fused_relu_1.run(buf2, primals_5, 16, grid=grid(16), stream=stream0) del primals_5 buf3 = empty_strided_cuda((4, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm] extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32) # Topologically Sorted Source Nodes: [log_probs], Original ATen: [aten._log_softmax] triton_poi_fused__log_softmax_2.run(buf3, buf4, 8, grid=grid(8), stream=stream0) del buf3 return (buf4, buf0, buf2, buf4, primals_6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 12), (12, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 48 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp9 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tl.full([1], 12, tl.int64) tmp14 = tl.load(in_ptr2 + (4 * x1 + (-8 + x0)), tmp11 & xmask, eviction_policy='evict_last', other=0.0) tmp15 = tl.where(tmp9, tmp10, tmp14) tmp16 = tl.where(tmp4, tmp5, tmp15) tl.store(out_ptr0 + x2, tmp16, xmask) @triton.jit def triton_poi_fused_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x2, tmp4, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp4 = tmp0 - tmp3 tmp5 = tmp1 - tmp3 tmp6 = tl_math.exp(tmp5) tmp7 = tmp2 - tmp3 tmp8 = tl_math.exp(tmp7) tmp9 = tmp6 + tmp8 tmp10 = tl_math.log(tmp9) tmp11 = tmp4 - tmp10 tl.store(out_ptr0 + x2, tmp11, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, 4), (4, 1)) assert_size_stride(primals_4, (4, 12), (12, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (2, 4), (4, 1)) assert_size_stride(primals_7, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 12), (12, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(48)](primals_2, primals_3, primals_1, buf0, 48, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 del primals_2 del primals_3 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_4, (12, 4), (1, 12), 0), out=buf1) del primals_4 buf2 = buf1 del buf1 triton_poi_fused_relu_1[grid(16)](buf2, primals_5, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf3 = empty_strided_cuda((4, 2), (2, 1), torch.float32) extern_kernels.addmm(primals_7, buf2, reinterpret_tensor(primals_6, (4, 2), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_7 buf4 = empty_strided_cuda((4, 2), (2, 1), torch.float32) triton_poi_fused__log_softmax_2[grid(8)](buf3, buf4, 8, XBLOCK=8, num_warps=1, num_stages=1) del buf3 return buf4, buf0, buf2, buf4, primals_6 class ParentChildClassifierNew(nn.Module): def __init__(self, parent_dim, child_short_dim, child_full_dim, hidden_dim ): super(ParentChildClassifierNew, self).__init__() if child_full_dim is not None: self.hidden = nn.Linear(parent_dim + child_short_dim + child_full_dim, hidden_dim) else: self.hidden = nn.Linear(parent_dim + child_short_dim, hidden_dim) self.relu = nn.ReLU() self.out = nn.Linear(hidden_dim, 2) self.logsoftmax = nn.LogSoftmax(dim=1) def forward(self, input_0, input_1, input_2): primals_4 = self.hidden.weight primals_5 = self.hidden.bias primals_6 = self.out.weight primals_7 = self.out.bias primals_1 = input_0 primals_2 = input_1 primals_3 = input_2 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
YilunZhou/wikihow-embedding
ParentChildClassifier
false
18,135
[ "MIT" ]
8
bfbcaf6aca854cd7e0dedfd5ecf77627138e8425
https://github.com/YilunZhou/wikihow-embedding/tree/bfbcaf6aca854cd7e0dedfd5ecf77627138e8425
NextSentencePrediction
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.distributions class NextSentencePrediction(nn.Module): """ 2-class classification model : is_next, is_not_next """ def __init__(self, hidden): """ :param hidden: BERT model output size """ super().__init__() self.linear = nn.Linear(hidden, 2) self.softmax = nn.LogSoftmax(dim=-1) def forward(self, x): return self.softmax(self.linear(x[:, 0])) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.utils.data.distributed import torch.distributions assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tl.store(out_ptr0 + x2, tmp0, xmask) @triton.jit def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 2 x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + 0) tmp5 = tl.broadcast_to(tmp4, [XBLOCK]) tmp7 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + 1) tmp9 = tl.broadcast_to(tmp8, [XBLOCK]) tmp2 = tmp0 + tmp1 tmp6 = tmp3 + tmp5 tmp10 = tmp7 + tmp9 tmp11 = triton_helpers.maximum(tmp6, tmp10) tmp12 = tmp2 - tmp11 tl.store(out_ptr0 + x2, tmp12, xmask) @triton.jit def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 2 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp6 = tl_math.log(tmp5) tmp7 = tmp0 - tmp6 tl.store(out_ptr0 + x2, tmp7, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4), (4, 1)) assert_size_stride(primals_3, (2,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((16, 2), (2, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1) del primals_2 buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32) triton_poi_fused__log_softmax_add_1[grid(32)](buf1, primals_3, buf2, 32, XBLOCK=32, num_warps=1, num_stages=1) del primals_3 buf3 = reinterpret_tensor(buf1, (4, 4, 2), (8, 2, 1), 0) del buf1 triton_poi_fused__log_softmax_2[grid(32)](buf2, buf3, 32, XBLOCK=32, num_warps=1, num_stages=1) del buf2 return buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3 class NextSentencePredictionNew(nn.Module): """ 2-class classification model : is_next, is_not_next """ def __init__(self, hidden): """ :param hidden: BERT model output size """ super().__init__() self.linear = nn.Linear(hidden, 2) self.softmax = nn.LogSoftmax(dim=-1) def forward(self, input_0): primals_2 = self.linear.weight primals_3 = self.linear.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
greenstar1151/pytorch-benchmark
NextSentencePrediction
false
10,446
[ "BSD-3-Clause" ]
0
8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b
https://github.com/greenstar1151/pytorch-benchmark/tree/8b7808d3be6b7ca1d57f1812e35fd2df5e470f8b
FeedForward
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/2y/c2yti67f2jfse53hwzelohv44virmhqysai2rurtxxsgeb6u5mnu.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.gelu] # Source node to ATen node mapping: # x_1 => add, erf, mul, mul_1, mul_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) triton_poi_fused_gelu_0 = async_compile.triton('triton_poi_fused_gelu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 16), (16, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.gelu] stream0 = get_raw_stream(0) triton_poi_fused_gelu_0.run(buf0, buf1, 1024, grid=grid(1024), stream=stream0) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_5 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((16, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (16, 4), (4, 1)) assert_size_stride(primals_2, (16,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 16), (16, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 16), (16, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 16), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 16), (256, 64, 16, 1), torch. float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(1024)](buf0, buf1, 1024, XBLOCK=256, num_warps=4, num_stages=1) buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), reinterpret_tensor(primals_4, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf2) del primals_5 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(buf1, (64, 16), (16, 1), 0), primals_4 def exists(val): return val is not None def default(val, d): return val if exists(val) else d class FeedForwardNew(nn.Module): def __init__(self, dim, mult=4, dropout=0.0, activation=None, glu=False): super().__init__() activation = default(activation, nn.GELU) self.glu = glu self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1)) self.act = activation() self.dropout = nn.Dropout(dropout) self.w2 = nn.Linear(dim * mult, dim) def forward(self, input_0): primals_1 = self.w1.weight primals_2 = self.w1.bias primals_4 = self.w2.weight primals_5 = self.w2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
ExpectationMax/Translational-Equivariant-Performers
FeedForward
false
8,075
[ "MIT" ]
10
c7a55af3b581426512eb4a57d3a13eb20e93fbd6
https://github.com/ExpectationMax/Translational-Equivariant-Performers/tree/c7a55af3b581426512eb4a57d3a13eb20e93fbd6
AttentionModule
import torch from torch import nn from torch.nn import functional as F class AttentionModule(nn.Module): """ A neural module that takes a feature map and attention, attends to the features, and produces an attention. Extended Summary ---------------- A :class:`AttentionModule` takes input features and an attention and produces an attention. It multiplicatively combines its input feature map and attention to attend to the relevant region of the feature map. It then processes the attended features via a series of convolutions and produces an attention mask highlighting the objects that possess the attribute the module is looking for. For example, an :class:`AttentionModule` may be tasked with finding cubes. Given an input attention of all ones, it will highlight all the cubes in the provided input features. Given an attention mask highlighting all the red objects, it will produce an attention mask highlighting all the red cubes. Parameters ---------- dim: int The number of channels of each convolutional filter. """ def __init__(self, dim: 'int'): super().__init__() self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(dim, 1, kernel_size=1, padding=0) torch.nn.init.kaiming_normal_(self.conv1.weight) torch.nn.init.kaiming_normal_(self.conv2.weight) torch.nn.init.kaiming_normal_(self.conv3.weight) self.dim = dim def forward(self, feats, attn): attended_feats = torch.mul(feats, attn.repeat(1, self.dim, 1, 1)) out = F.relu(self.conv1(attended_feats)) out = F.relu(self.conv2(out)) out = torch.sigmoid(self.conv3(out)) return out def get_inputs(): return [torch.rand([4, 4, 64, 64]), torch.rand([4, 1, 64, 64])] def get_init_inputs(): return [[], {'dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_repeat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x0 = xindex % 4096 x2 = xindex // 16384 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + (x0 + 4096 * x2), None, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x3, tmp2, None) @triton.jit def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 4096 % 4 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, None) @triton.jit def triton_poi_fused_convolution_sigmoid_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex tmp0 = tl.load(in_out_ptr0 + x0, None) tmp1 = tl.load(in_ptr0 + 0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK]) tmp3 = tmp0 + tmp2 tmp4 = tl.sigmoid(tmp3) tl.store(in_out_ptr0 + x0, tmp4, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8) = args args.clear() assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1)) assert_size_stride(primals_2, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_3, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_8, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_repeat_0[grid(65536)](primals_2, primals_1, buf0, 65536, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 del primals_2 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf2 = buf1 del buf1 triton_poi_fused_convolution_relu_1[grid(65536)](buf2, primals_4, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_4 buf3 = extern_kernels.convolution(buf2, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf3, (4, 4, 64, 64), (16384, 4096, 64, 1)) buf4 = buf3 del buf3 triton_poi_fused_convolution_relu_1[grid(65536)](buf4, primals_6, 65536, XBLOCK=512, num_warps=4, num_stages=1) del primals_6 buf5 = extern_kernels.convolution(buf4, primals_7, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf5, (4, 1, 64, 64), (4096, 4096, 64, 1)) buf6 = buf5 del buf5 triton_poi_fused_convolution_sigmoid_2[grid(16384)](buf6, primals_8, 16384, XBLOCK=256, num_warps=4, num_stages=1) del primals_8 return buf6, primals_3, primals_5, primals_7, buf0, buf2, buf4, buf6 class AttentionModuleNew(nn.Module): """ A neural module that takes a feature map and attention, attends to the features, and produces an attention. Extended Summary ---------------- A :class:`AttentionModule` takes input features and an attention and produces an attention. It multiplicatively combines its input feature map and attention to attend to the relevant region of the feature map. It then processes the attended features via a series of convolutions and produces an attention mask highlighting the objects that possess the attribute the module is looking for. For example, an :class:`AttentionModule` may be tasked with finding cubes. Given an input attention of all ones, it will highlight all the cubes in the provided input features. Given an attention mask highlighting all the red objects, it will produce an attention mask highlighting all the red cubes. Parameters ---------- dim: int The number of channels of each convolutional filter. """ def __init__(self, dim: 'int'): super().__init__() self.conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1) self.conv2 = nn.Conv2d(dim, dim, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(dim, 1, kernel_size=1, padding=0) torch.nn.init.kaiming_normal_(self.conv1.weight) torch.nn.init.kaiming_normal_(self.conv2.weight) torch.nn.init.kaiming_normal_(self.conv3.weight) self.dim = dim def forward(self, input_0, input_1): primals_3 = self.conv1.weight primals_4 = self.conv1.bias primals_5 = self.conv2.weight primals_6 = self.conv2.bias primals_7 = self.conv3.weight primals_8 = self.conv3.bias primals_2 = input_0 primals_1 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8]) return output[0]
kdexd/probnmn-clevr
AttentionModule
false
15,795
[ "MIT" ]
69
9c1b2286cf30e9fb045370153c9242a39760e02e
https://github.com/kdexd/probnmn-clevr/tree/9c1b2286cf30e9fb045370153c9242a39760e02e
DiceLoss
import collections import torch import warnings from typing import Optional from typing import Union from typing import Any from typing import Callable from typing import Tuple import torch.nn from torch.nn.modules.loss import _Loss from enum import Enum import collections.abc def issequenceiterable(obj: 'Any') ->bool: """ Determine if the object is an iterable sequence and is not a string. """ if torch.is_tensor(obj): return int(obj.dim()) > 0 return isinstance(obj, collections.abc.Iterable) and not isinstance(obj, str) def ensure_tuple(vals: 'Any') ->Tuple[Any, ...]: """ Returns a tuple of `vals`. """ if not issequenceiterable(vals): vals = vals, return tuple(vals) def ensure_tuple_size(tup: 'Any', dim: 'int', pad_val: 'Any'=0) ->Tuple[Any, ...]: """ Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary. """ tup = ensure_tuple(tup) + (pad_val,) * dim return tuple(tup[:dim]) def one_hot(labels: 'torch.Tensor', num_classes: 'int', dtype: 'torch.dtype'=torch.float, dim: 'int'=1) ->torch.Tensor: """ For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]` for `num_classes` N number of classes. Example: For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0. Note that this will include the background label, thus a binary mask should be treated as having 2 classes. """ assert labels.dim() > 0, 'labels should have dim of 1 or more.' if labels.ndim < dim + 1: shape = ensure_tuple_size(labels.shape, dim + 1, 1) labels = labels.reshape(*shape) sh = list(labels.shape) assert sh[dim ] == 1, 'labels should have a channel with length equals to one.' sh[dim] = num_classes o = torch.zeros(size=sh, dtype=dtype, device=labels.device) labels = o.scatter_(dim=dim, index=labels.long(), value=1) return labels class LossReduction(Enum): """ See also: - :py:class:`monai.losses.dice.DiceLoss` - :py:class:`monai.losses.dice.GeneralizedDiceLoss` - :py:class:`monai.losses.focal_loss.FocalLoss` - :py:class:`monai.losses.tversky.TverskyLoss` """ NONE = 'none' MEAN = 'mean' SUM = 'sum' class DiceLoss(_Loss): """ Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks. Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]). Axis N of `input` is expected to have logit predictions for each class rather than being image channels, while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are values added to the intersection and union components of the inter-over-union calculation to smooth results respectively, these values should be small. The `include_background` class attribute can be set to False for an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be background. If the non-background segmentations are small compared to the total image size they can get overwhelmed by the signal from the background so excluding it in such cases helps convergence. Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016. """ def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool' =False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act: 'Optional[Callable]'=None, squared_pred: 'bool'=False, jaccard: 'bool'=False, reduction: 'Union[LossReduction, str]'=LossReduction. MEAN, smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch: 'bool'=False) ->None: """ Args: include_background: if False channel index 0 (background category) is excluded from the calculation. to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False. sigmoid: if True, apply a sigmoid function to the prediction. softmax: if True, apply a softmax function to the prediction. other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute other activation layers, Defaults to ``None``. for example: `other_act = torch.tanh`. squared_pred: use squared versions of targets and predictions in the denominator or not. jaccard: compute Jaccard Index (soft IoU) instead of dice or not. reduction: {``"none"``, ``"mean"``, ``"sum"``} Specifies the reduction to apply to the output. Defaults to ``"mean"``. - ``"none"``: no reduction will be applied. - ``"mean"``: the sum of the output will be divided by the number of elements in the output. - ``"sum"``: the output will be summed. smooth_nr: a small constant added to the numerator to avoid zero. smooth_dr: a small constant added to the denominator to avoid nan. batch: whether to sum the intersection and union areas over the batch dimension before the dividing. Defaults to False, a Dice loss value is computed independently from each item in the batch before any `reduction`. Raises: TypeError: When ``other_act`` is not an ``Optional[Callable]``. ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``]. Incompatible values. """ super().__init__(reduction=LossReduction(reduction).value) if other_act is not None and not callable(other_act): raise TypeError( f'other_act must be None or callable but is {type(other_act).__name__}.' ) if int(sigmoid) + int(softmax) + int(other_act is not None) > 1: raise ValueError( 'Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].' ) self.include_background = include_background self.to_onehot_y = to_onehot_y self.sigmoid = sigmoid self.softmax = softmax self.other_act = other_act self.squared_pred = squared_pred self.jaccard = jaccard self.smooth_nr = float(smooth_nr) self.smooth_dr = float(smooth_dr) self.batch = batch def forward(self, input: 'torch.Tensor', target: 'torch.Tensor' ) ->torch.Tensor: """ Args: input: the shape should be BNH[WD]. target: the shape should be BNH[WD]. Raises: ValueError: When ``self.reduction`` is not one of ["mean", "sum", "none"]. """ if self.sigmoid: input = torch.sigmoid(input) n_pred_ch = input.shape[1] if self.softmax: if n_pred_ch == 1: warnings.warn( 'single channel prediction, `softmax=True` ignored.') else: input = torch.softmax(input, 1) if self.other_act is not None: input = self.other_act(input) if self.to_onehot_y: if n_pred_ch == 1: warnings.warn( 'single channel prediction, `to_onehot_y=True` ignored.') else: target = one_hot(target, num_classes=n_pred_ch) if not self.include_background: if n_pred_ch == 1: warnings.warn( 'single channel prediction, `include_background=False` ignored.' ) else: target = target[:, 1:] input = input[:, 1:] assert target.shape == input.shape, f'ground truth has differing shape ({target.shape}) from input ({input.shape})' reduce_axis = list(range(2, len(input.shape))) if self.batch: reduce_axis = [0] + reduce_axis intersection = torch.sum(target * input, dim=reduce_axis) if self.squared_pred: target = torch.pow(target, 2) input = torch.pow(input, 2) ground_o = torch.sum(target, dim=reduce_axis) pred_o = torch.sum(input, dim=reduce_axis) denominator = ground_o + pred_o if self.jaccard: denominator = 2.0 * (denominator - intersection) f: 'torch.Tensor' = 1.0 - (2.0 * intersection + self.smooth_nr) / ( denominator + self.smooth_dr) if self.reduction == LossReduction.MEAN.value: f = torch.mean(f) elif self.reduction == LossReduction.SUM.value: f = torch.sum(f) elif self.reduction == LossReduction.NONE.value: pass else: raise ValueError( f'Unsupported reduction: {self.reduction}, available options are ["mean", "sum", "none"].' ) return f def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import collections from typing import Optional from typing import Union from typing import Any from typing import Callable from typing import Tuple import torch.nn from torch.nn.modules.loss import _Loss from enum import Enum import collections.abc assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r1 + 16 * x0), xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tmp7 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp9 = tl.where(xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp13 = tl.where(xmask, tmp11, 0) tmp14 = tl.sum(tmp13, 1)[:, None] tl.store(out_ptr0 + x0, tmp6, xmask) tl.store(out_ptr1 + x0, tmp10, xmask) tl.store(out_ptr2 + x0, tmp14, xmask) @triton.jit def triton_per_fused_add_div_mean_mul_rsub_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp5 = tl.load(in_ptr1 + r0, None) tmp6 = tl.load(in_ptr2 + r0, None) tmp1 = 2.0 tmp2 = tmp0 * tmp1 tmp3 = 1e-05 tmp4 = tmp2 + tmp3 tmp7 = tmp5 + tmp6 tmp8 = tmp7 + tmp3 tmp9 = tmp4 / tmp8 tmp10 = 1.0 tmp11 = tmp10 - tmp9 tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = tl.sum(tmp12, 1)[:, None] tmp15 = 16.0 tmp16 = tmp14 / tmp15 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(16)](arg1_1, arg0_1, buf0, buf1, buf2, 16, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_add_div_mean_mul_rsub_1[grid(1)](buf4, buf0, buf1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 return buf4, def issequenceiterable(obj: 'Any') ->bool: """ Determine if the object is an iterable sequence and is not a string. """ if torch.is_tensor(obj): return int(obj.dim()) > 0 return isinstance(obj, collections.abc.Iterable) and not isinstance(obj, str) def ensure_tuple(vals: 'Any') ->Tuple[Any, ...]: """ Returns a tuple of `vals`. """ if not issequenceiterable(vals): vals = vals, return tuple(vals) def ensure_tuple_size(tup: 'Any', dim: 'int', pad_val: 'Any'=0) ->Tuple[Any, ...]: """ Returns a copy of `tup` with `dim` values by either shortened or padded with `pad_val` as necessary. """ tup = ensure_tuple(tup) + (pad_val,) * dim return tuple(tup[:dim]) def one_hot(labels: 'torch.Tensor', num_classes: 'int', dtype: 'torch.dtype'=torch.float, dim: 'int'=1) ->torch.Tensor: """ For a tensor `labels` of dimensions B1[spatial_dims], return a tensor of dimensions `BN[spatial_dims]` for `num_classes` N number of classes. Example: For every value v = labels[b,1,h,w], the value in the result at [b,v,h,w] will be 1 and all others 0. Note that this will include the background label, thus a binary mask should be treated as having 2 classes. """ assert labels.dim() > 0, 'labels should have dim of 1 or more.' if labels.ndim < dim + 1: shape = ensure_tuple_size(labels.shape, dim + 1, 1) labels = labels.reshape(*shape) sh = list(labels.shape) assert sh[dim ] == 1, 'labels should have a channel with length equals to one.' sh[dim] = num_classes o = torch.zeros(size=sh, dtype=dtype, device=labels.device) labels = o.scatter_(dim=dim, index=labels.long(), value=1) return labels class LossReduction(Enum): """ See also: - :py:class:`monai.losses.dice.DiceLoss` - :py:class:`monai.losses.dice.GeneralizedDiceLoss` - :py:class:`monai.losses.focal_loss.FocalLoss` - :py:class:`monai.losses.tversky.TverskyLoss` """ NONE = 'none' MEAN = 'mean' SUM = 'sum' class DiceLossNew(_Loss): """ Compute average Dice loss between two tensors. It can support both multi-classes and multi-labels tasks. Input logits `input` (BNHW[D] where N is number of classes) is compared with ground truth `target` (BNHW[D]). Axis N of `input` is expected to have logit predictions for each class rather than being image channels, while the same axis of `target` can be 1 or N (one-hot format). The `smooth_nr` and `smooth_dr` parameters are values added to the intersection and union components of the inter-over-union calculation to smooth results respectively, these values should be small. The `include_background` class attribute can be set to False for an instance of DiceLoss to exclude the first category (channel index 0) which is by convention assumed to be background. If the non-background segmentations are small compared to the total image size they can get overwhelmed by the signal from the background so excluding it in such cases helps convergence. Milletari, F. et. al. (2016) V-Net: Fully Convolutional Neural Networks forVolumetric Medical Image Segmentation, 3DV, 2016. """ def __init__(self, include_background: 'bool'=True, to_onehot_y: 'bool' =False, sigmoid: 'bool'=False, softmax: 'bool'=False, other_act: 'Optional[Callable]'=None, squared_pred: 'bool'=False, jaccard: 'bool'=False, reduction: 'Union[LossReduction, str]'=LossReduction. MEAN, smooth_nr: 'float'=1e-05, smooth_dr: 'float'=1e-05, batch: 'bool'=False) ->None: """ Args: include_background: if False channel index 0 (background category) is excluded from the calculation. to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False. sigmoid: if True, apply a sigmoid function to the prediction. softmax: if True, apply a softmax function to the prediction. other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute other activation layers, Defaults to ``None``. for example: `other_act = torch.tanh`. squared_pred: use squared versions of targets and predictions in the denominator or not. jaccard: compute Jaccard Index (soft IoU) instead of dice or not. reduction: {``"none"``, ``"mean"``, ``"sum"``} Specifies the reduction to apply to the output. Defaults to ``"mean"``. - ``"none"``: no reduction will be applied. - ``"mean"``: the sum of the output will be divided by the number of elements in the output. - ``"sum"``: the output will be summed. smooth_nr: a small constant added to the numerator to avoid zero. smooth_dr: a small constant added to the denominator to avoid nan. batch: whether to sum the intersection and union areas over the batch dimension before the dividing. Defaults to False, a Dice loss value is computed independently from each item in the batch before any `reduction`. Raises: TypeError: When ``other_act`` is not an ``Optional[Callable]``. ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``]. Incompatible values. """ super().__init__(reduction=LossReduction(reduction).value) if other_act is not None and not callable(other_act): raise TypeError( f'other_act must be None or callable but is {type(other_act).__name__}.' ) if int(sigmoid) + int(softmax) + int(other_act is not None) > 1: raise ValueError( 'Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].' ) self.include_background = include_background self.to_onehot_y = to_onehot_y self.sigmoid = sigmoid self.softmax = softmax self.other_act = other_act self.squared_pred = squared_pred self.jaccard = jaccard self.smooth_nr = float(smooth_nr) self.smooth_dr = float(smooth_dr) self.batch = batch def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
LucasFidon/MONAI
DiceLoss
false
2,592
[ "Apache-2.0" ]
0
a7ef9d567775dd7a222f93bab08191c0e3532c92
https://github.com/LucasFidon/MONAI/tree/a7ef9d567775dd7a222f93bab08191c0e3532c92
Transformer
import torch from torch import nn import torch.nn.functional as F import torch.utils.data class Transformer(nn.Module): def __init__(self, in_channels, out_channels): super(Transformer, self).__init__() self.T_sigma = nn.Linear(in_channels, out_channels) self.T_gamma = nn.Linear(in_channels, out_channels) def forward(self, sigma, gamma): sigma_out = self.T_sigma(sigma) gamma_out = self.T_gamma(gamma) return F.softplus(sigma_out), F.softplus(gamma_out) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 20.0 tmp2 = tmp0 > tmp1 tmp3 = tl_math.exp(tmp0) tmp4 = libdevice.log1p(tmp3) tmp5 = tl.where(tmp2, tmp0, tmp4) tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_softplus_0[grid(256)](buf0, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_softplus_0[grid(256)](buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf2, buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1 class TransformerNew(nn.Module): def __init__(self, in_channels, out_channels): super(TransformerNew, self).__init__() self.T_sigma = nn.Linear(in_channels, out_channels) self.T_gamma = nn.Linear(in_channels, out_channels) def forward(self, input_0, input_1): primals_1 = self.T_sigma.weight primals_2 = self.T_sigma.bias primals_4 = self.T_gamma.weight primals_5 = self.T_gamma.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
JunLi-Galios/PGGAN
Transformer
false
13,911
[ "Apache-2.0" ]
58
b8bd3dc44c71a985315fb82070e911378cf210db
https://github.com/JunLi-Galios/PGGAN/tree/b8bd3dc44c71a985315fb82070e911378cf210db
ImgSenRanking
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/w5/cw5vvkfcjyk5vbg23gp3mvqvfkh6smcdvibnigzm65mqweiwnzsd.py # Topologically Sorted Source Nodes: [norm, clamp, l2_inp, expand_as], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.expand] # Source node to ATen node mapping: # clamp => clamp_min # expand_as => expand # l2_inp => div # norm => pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_3, 2.0), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%pow_2, 1e-12), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_3, %clamp_min), kwargs = {}) # %expand : [num_users=1] = call_function[target=torch.ops.aten.expand.default](args = (%div, [4, 4, 4, 4]), kwargs = {}) triton_poi_fused_clamp_div_expand_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_clamp_div_expand_linalg_vector_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_expand_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clamp_div_expand_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x3), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [img_vec], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [sent_vec], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm, clamp, l2_inp, expand_as], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.expand] stream0 = get_raw_stream(0) triton_poi_fused_clamp_div_expand_linalg_vector_norm_0.run(buf1, buf2, 256, grid=grid(256), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [norm_1, clamp_1, l2_inp_1, expand_as_1], Original ATen: [aten.linalg_vector_norm, aten.clamp, aten.div, aten.expand] triton_poi_fused_clamp_div_expand_linalg_vector_norm_0.run(buf0, buf3, 256, grid=grid(256), stream=stream0) return (buf2, buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import numpy as np import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clamp_div_expand_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x3, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_clamp_div_expand_linalg_vector_norm_0[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_clamp_div_expand_linalg_vector_norm_0[grid(256)](buf0, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf2, buf3, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf0, reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), buf1 def l2norm(input, p=2.0, dim=1, eps=1e-12): """ Compute L2 norm, row-wise """ l2_inp = input / input.norm(p, dim, keepdim=True).clamp(min=eps) return l2_inp.expand_as(input) def xavier_weight(tensor): nin, nout = tensor.size()[0], tensor.size()[1] r = np.sqrt(6.0) / np.sqrt(nin + nout) return tensor.normal_(0, r) class ImgSenRankingNew(torch.nn.Module): def __init__(self, dim_image, sent_dim, hid_dim): super(ImgSenRankingNew, self).__init__() self.register_buffer('device_id', torch.IntTensor(1)) self.linear_img = torch.nn.Linear(dim_image, hid_dim) self.linear_sent = torch.nn.Linear(sent_dim, hid_dim) self.init_weights() def init_weights(self): xavier_weight(self.linear_img.weight.data) xavier_weight(self.linear_sent.weight.data) self.linear_img.bias.data.fill_(0) self.linear_sent.bias.data.fill_(0) def forward(self, input_0, input_1): primals_1 = self.linear_img.weight primals_2 = self.linear_img.bias primals_4 = self.linear_sent.weight primals_5 = self.linear_sent.bias primals_3 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0], output[1]
ypxie/HDGan
ImgSenRanking
false
16,770
[ "MIT" ]
160
d98e2a85f7ae6ce7bfacd1c15e519558d97cb931
https://github.com/ypxie/HDGan/tree/d98e2a85f7ae6ce7bfacd1c15e519558d97cb931
SAP
import torch import torch.nn as nn class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPooling, self).__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (N, T, 1) return: utter_rep: size (N, H) """ batch_rep.shape[1] att_logits = self.W(batch_rep).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep class SAP(nn.Module): """ Self Attention Pooling module incoporate attention mask""" def __init__(self, out_dim): super(SAP, self).__init__() self.act_fn = nn.Tanh() self.sap_layer = SelfAttentionPooling(out_dim) def forward(self, feature, att_mask): """ Arguments feature - [BxTxD] Acoustic feature with shape att_mask - [BxTx1] Attention Mask logits """ feature = self.act_fn(feature) sap_vec = self.sap_layer(feature, att_mask) return sap_vec def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'out_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = libdevice.tanh(tmp0) tl.store(out_ptr0 + x0, tmp1, xmask) @triton.jit def triton_poi_fused__softmax_add_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 16 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp6 = triton_helpers.maximum(tmp2, tmp5) tmp9 = tmp7 + tmp8 tmp10 = triton_helpers.maximum(tmp6, tmp9) tmp13 = tmp11 + tmp12 tmp14 = triton_helpers.maximum(tmp10, tmp13) tmp15 = tmp2 - tmp14 tmp16 = tl_math.exp(tmp15) tmp17 = tmp5 - tmp14 tmp18 = tl_math.exp(tmp17) tmp19 = tmp16 + tmp18 tmp20 = tmp9 - tmp14 tmp21 = tl_math.exp(tmp20) tmp22 = tmp19 + tmp21 tmp23 = tmp13 - tmp14 tmp24 = tl_math.exp(tmp23) tmp25 = tmp22 + tmp24 tl.store(out_ptr0 + x2, tmp14, xmask) tl.store(out_ptr1 + x2, tmp25, xmask) @triton.jit def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 64 x3 = xindex // 64 x5 = xindex // 4 % 16 x2 = xindex // 16 % 4 x7 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tl.load(in_ptr2 + x5, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr3 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr4 + (x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp10 = tl.load(in_ptr0 + (64 + x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr2 + (16 + x5), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr3 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp17 = tl.load(in_ptr4 + (4 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (128 + x4), xmask, eviction_policy='evict_last') tmp22 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp23 = tl.load(in_ptr2 + (32 + x5), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr3 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp28 = tl.load(in_ptr4 + (8 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr0 + (192 + x4), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp34 = tl.load(in_ptr2 + (48 + x5), xmask, eviction_policy='evict_last') tmp36 = tl.load(in_ptr3 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp39 = tl.load(in_ptr4 + (12 + x2 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 - tmp4 tmp6 = tl_math.exp(tmp5) tmp8 = tmp6 / tmp7 tmp9 = tmp0 * tmp8 tmp13 = tmp11 + tmp12 tmp15 = tmp13 - tmp14 tmp16 = tl_math.exp(tmp15) tmp18 = tmp16 / tmp17 tmp19 = tmp10 * tmp18 tmp20 = tmp9 + tmp19 tmp24 = tmp22 + tmp23 tmp26 = tmp24 - tmp25 tmp27 = tl_math.exp(tmp26) tmp29 = tmp27 / tmp28 tmp30 = tmp21 * tmp29 tmp31 = tmp20 + tmp30 tmp35 = tmp33 + tmp34 tmp37 = tmp35 - tmp36 tmp38 = tl_math.exp(tmp37) tmp40 = tmp38 / tmp39 tmp41 = tmp32 * tmp40 tmp42 = tmp31 + tmp41 tl.store(out_ptr0 + x7, tmp42, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, 4), (4, 1)) assert_size_stride(primals_3, (1,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_tanh_0[grid(256)](primals_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), ( 4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 4), 0), alpha=1, beta=1, out=buf2) del primals_2 del primals_3 buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf4 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) triton_poi_fused__softmax_add_1[grid(64)](primals_4, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_mul_sum_2[grid(256)](buf0, primals_4, buf2, buf3, buf4, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del buf3 del buf4 return buf5, primals_4, buf0, buf2 class SelfAttentionPooling(nn.Module): """ Implementation of SelfAttentionPooling Original Paper: Self-Attention Encoding and Pooling for Speaker Recognition https://arxiv.org/pdf/2008.01077v1.pdf """ def __init__(self, input_dim): super(SelfAttentionPooling, self).__init__() self.W = nn.Linear(input_dim, 1) self.softmax = nn.functional.softmax def forward(self, batch_rep, att_mask): """ input: batch_rep : size (N, T, H), N: batch size, T: sequence length, H: Hidden dimension attention_weight: att_w : size (N, T, 1) return: utter_rep: size (N, H) """ batch_rep.shape[1] att_logits = self.W(batch_rep).squeeze(-1) att_logits = att_mask + att_logits att_w = self.softmax(att_logits, dim=-1).unsqueeze(-1) utter_rep = torch.sum(batch_rep * att_w, dim=1) return utter_rep class SAPNew(nn.Module): """ Self Attention Pooling module incoporate attention mask""" def __init__(self, out_dim): super(SAPNew, self).__init__() self.act_fn = nn.Tanh() self.sap_layer = SelfAttentionPooling(out_dim) def forward(self, input_0, input_1): primals_2 = self.sap_layer.W.weight primals_3 = self.sap_layer.W.bias primals_1 = input_0 primals_4 = input_1 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
B06901052/s3prl
SAP
false
123
[ "MIT" ]
0
5f63d2df043d2d7c81580cd042fa2cea34746f48
https://github.com/B06901052/s3prl/tree/5f63d2df043d2d7c81580cd042fa2cea34746f48
NormedLinear
import torch import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data import torch.nn.functional as F from torch.nn import Parameter class NormedLinear(nn.Module): def __init__(self, in_features, out_features): super(NormedLinear, self).__init__() self.weight = Parameter(torch.Tensor(in_features, out_features)) self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0) def forward(self, x): out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0)) return out def get_inputs(): return [torch.rand([4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.parallel import torch.optim import torch.utils.data from torch.nn import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) @triton.jit def triton_poi_fused_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-12 tmp14 = triton_helpers.maximum(tmp12, tmp13) tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_0[grid(16)](primals_1, buf0, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_1 buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_poi_fused_div_1[grid(16)](primals_2, buf1, 16, XBLOCK=16, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(buf0, buf1, out=buf2) del buf1 return buf2, primals_2, reinterpret_tensor(buf0, (4, 4), (1, 4), 0) class NormedLinearNew(nn.Module): def __init__(self, in_features, out_features): super(NormedLinearNew, self).__init__() self.weight = Parameter(torch.Tensor(in_features, out_features)) self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-05).mul_(100000.0) def forward(self, input_0): primals_1 = self.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
dixit-dude7/LDAM-DRW
NormedLinear
false
12,285
[ "MIT" ]
0
6366f4756d3ac0c6b6db784b7f20e16066967ed4
https://github.com/dixit-dude7/LDAM-DRW/tree/6366f4756d3ac0c6b6db784b7f20e16066967ed4
AFMLayer
import itertools import torch import torch.nn as nn import torch.nn.functional as F from sklearn.metrics import * class AFMLayer(nn.Module): """Attentonal Factorization Machine models pairwise (order-2) feature interactions without linear term and bias. Input shape - A list of 3D tensor with shape: ``(batch_size,1,embedding_size)``. Output shape - 2D tensor with shape: ``(batch_size, 1)``. Arguments - **in_features** : Positive integer, dimensionality of input features. - **attention_factor** : Positive integer, dimensionality of the attention network output space. - **l2_reg_w** : float between 0 and 1. L2 regularizer strength applied to attention network. - **dropout_rate** : float between in [0,1). Fraction of the attention net output units to dropout. - **seed** : A Python integer to use as random seed. References - [Attentional Factorization Machines : Learning the Weight of Feature Interactions via Attention Networks](https://arxiv.org/pdf/1708.04617.pdf) """ def __init__(self, in_features, attention_factor=4, l2_reg_w=0, dropout_rate=0, seed=1024, device='cpu'): super(AFMLayer, self).__init__() self.attention_factor = attention_factor self.l2_reg_w = l2_reg_w self.dropout_rate = dropout_rate self.seed = seed embedding_size = in_features self.attention_W = nn.Parameter(torch.Tensor(embedding_size, self. attention_factor)) self.attention_b = nn.Parameter(torch.Tensor(self.attention_factor)) self.projection_h = nn.Parameter(torch.Tensor(self.attention_factor, 1) ) self.projection_p = nn.Parameter(torch.Tensor(embedding_size, 1)) for tensor in [self.attention_W, self.projection_h, self.projection_p]: nn.init.xavier_normal_(tensor) self.dropout = nn.Dropout(dropout_rate) self def forward(self, inputs): embeds_vec_list = inputs row = [] col = [] for r, c in itertools.combinations(embeds_vec_list, 2): row.append(r) col.append(c) p = torch.cat(row, dim=1) q = torch.cat(col, dim=1) inner_product = p * q bi_interaction = inner_product attention_temp = F.relu(torch.tensordot(bi_interaction, self. attention_W, dims=([-1], [0])) + self.attention_b) self.normalized_att_score = F.softmax(torch.tensordot( attention_temp, self.projection_h, dims=([-1], [0])), dim=1) attention_output = torch.sum(self.normalized_att_score * bi_interaction, dim=1) attention_output = self.dropout(attention_output) afm_out = torch.tensordot(attention_output, self.projection_p, dims =([-1], [0])) return afm_out def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn from sklearn.metrics import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 24 x0 = xindex % 4 x2 = xindex // 96 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 8, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr0 + (x0 + 4 * (-4 + x1) + 16 * x2), tmp9 & xmask, other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 12, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr0 + (x0 + 4 * (-8 + x1) + 16 * x2), tmp14 & xmask, other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 16, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr0 + (64 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 & xmask, other=0.0) tmp21 = tmp0 >= tmp17 tmp22 = tl.full([1], 20, tl.int64) tmp23 = tmp0 < tmp22 tmp24 = tmp21 & tmp23 tmp25 = tl.load(in_ptr0 + (64 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 & xmask, other=0.0) tmp26 = tmp0 >= tmp22 tl.full([1], 24, tl.int64) tmp29 = tl.load(in_ptr0 + (128 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 & xmask, other=0.0) tmp30 = tl.where(tmp24, tmp25, tmp29) tmp31 = tl.where(tmp19, tmp20, tmp30) tmp32 = tl.where(tmp14, tmp15, tmp31) tmp33 = tl.where(tmp9, tmp10, tmp32) tmp34 = tl.where(tmp4, tmp5, tmp33) tmp35 = tl.load(in_ptr0 + (64 + x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp36 = tl.load(in_ptr0 + (128 + x0 + 4 * (-4 + x1) + 16 * x2), tmp9 & xmask, other=0.0) tmp37 = tl.load(in_ptr0 + (192 + x0 + 4 * (-8 + x1) + 16 * x2), tmp14 & xmask, other=0.0) tmp38 = tl.load(in_ptr0 + (128 + x0 + 4 * (-12 + x1) + 16 * x2), tmp19 & xmask, other=0.0) tmp39 = tl.load(in_ptr0 + (192 + x0 + 4 * (-16 + x1) + 16 * x2), tmp24 & xmask, other=0.0) tmp40 = tl.load(in_ptr0 + (192 + x0 + 4 * (-20 + x1) + 16 * x2), tmp26 & xmask, other=0.0) tmp41 = tl.where(tmp24, tmp39, tmp40) tmp42 = tl.where(tmp19, tmp38, tmp41) tmp43 = tl.where(tmp14, tmp37, tmp42) tmp44 = tl.where(tmp9, tmp36, tmp43) tmp45 = tl.where(tmp4, tmp35, tmp44) tmp46 = tmp34 * tmp45 tl.store(in_out_ptr0 + x3, tmp46, xmask) @triton.jit def triton_poi_fused_add_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 384 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_per_fused__softmax_2(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 rnumel = 24 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 24 * x0), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, float('-inf')) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl_math.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask & xmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (r1 + 24 * x0), tmp11, rmask & xmask) @triton.jit def triton_per_fused_mul_sum_3(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 16 rnumel = 24 RBLOCK: tl.constexpr = 32 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r2 = rindex x1 = xindex // 4 x0 = xindex % 4 x3 = xindex tmp0 = tl.load(in_ptr0 + (r2 + 24 * x1), rmask & xmask, eviction_policy ='evict_last', other=0.0) tmp1 = tl.load(in_ptr1 + (x0 + 4 * r2 + 96 * x1), rmask & xmask, other=0.0) tmp2 = tmp0 * tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.where(rmask & xmask, tmp3, 0) tmp6 = tl.sum(tmp5, 1)[:, None] tl.store(out_ptr0 + x3, tmp6, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 1), (1, 1)) assert_size_stride(primals_5, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.float32) buf2 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_cat_mul_0[grid(384)](buf2, primals_1, 384, XBLOCK= 128, num_warps=4, num_stages=1) del primals_1 buf3 = empty_strided_cuda((96, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (96, 4), (4, 1), 0), primals_2, out=buf3) del primals_2 buf4 = reinterpret_tensor(buf3, (4, 24, 4), (96, 4, 1), 0) del buf3 buf11 = empty_strided_cuda((4, 24, 4), (96, 4, 1), torch.bool) triton_poi_fused_add_relu_threshold_backward_1[grid(384)](buf4, primals_3, buf11, 384, XBLOCK=256, num_warps=4, num_stages=1) del primals_3 buf5 = empty_strided_cuda((96, 1), (1, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf4, (96, 4), (4, 1), 0), primals_4, out=buf5) buf8 = empty_strided_cuda((4, 24, 1), (24, 1, 1), torch.float32) triton_per_fused__softmax_2[grid(4)](buf5, buf8, 4, 24, XBLOCK=1, num_warps=2, num_stages=1) del buf5 buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32) triton_per_fused_mul_sum_3[grid(16)](buf8, buf2, buf9, 16, 24, XBLOCK=1, num_warps=2, num_stages=1) buf10 = empty_strided_cuda((4, 1), (1, 1), torch.float32) extern_kernels.mm(buf9, primals_5, out=buf10) return buf10, buf8, buf2, buf8, reinterpret_tensor(buf9, (4, 4), (1, 4), 0 ), reinterpret_tensor(primals_5, (1, 4), (1, 1), 0 ), reinterpret_tensor(buf4, (4, 96), (1, 4), 0), reinterpret_tensor( primals_4, (1, 4), (1, 1), 0), buf11 class AFMLayerNew(nn.Module): """Attentonal Factorization Machine models pairwise (order-2) feature interactions without linear term and bias. Input shape - A list of 3D tensor with shape: ``(batch_size,1,embedding_size)``. Output shape - 2D tensor with shape: ``(batch_size, 1)``. Arguments - **in_features** : Positive integer, dimensionality of input features. - **attention_factor** : Positive integer, dimensionality of the attention network output space. - **l2_reg_w** : float between 0 and 1. L2 regularizer strength applied to attention network. - **dropout_rate** : float between in [0,1). Fraction of the attention net output units to dropout. - **seed** : A Python integer to use as random seed. References - [Attentional Factorization Machines : Learning the Weight of Feature Interactions via Attention Networks](https://arxiv.org/pdf/1708.04617.pdf) """ def __init__(self, in_features, attention_factor=4, l2_reg_w=0, dropout_rate=0, seed=1024, device='cpu'): super(AFMLayerNew, self).__init__() self.attention_factor = attention_factor self.l2_reg_w = l2_reg_w self.dropout_rate = dropout_rate self.seed = seed embedding_size = in_features self.attention_W = nn.Parameter(torch.Tensor(embedding_size, self. attention_factor)) self.attention_b = nn.Parameter(torch.Tensor(self.attention_factor)) self.projection_h = nn.Parameter(torch.Tensor(self.attention_factor, 1) ) self.projection_p = nn.Parameter(torch.Tensor(embedding_size, 1)) for tensor in [self.attention_W, self.projection_h, self.projection_p]: nn.init.xavier_normal_(tensor) self.dropout = nn.Dropout(dropout_rate) self def forward(self, input_0): primals_2 = self.attention_W primals_3 = self.attention_b primals_4 = self.projection_h primals_5 = self.projection_p primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
chenkkkk/DeepCTR-PyTorch
AFMLayer
false
6,475
[ "Apache-2.0" ]
1
a10a3ace4ad79171e7fb182407b3e4d22bf753e7
https://github.com/chenkkkk/DeepCTR-PyTorch/tree/a10a3ace4ad79171e7fb182407b3e4d22bf753e7
WordAttentionPool
from _paritybench_helpers import _mock_config import torch import torch.nn as nn class WordAttentionPool(nn.Module): def __init__(self, cfg): super(WordAttentionPool, self).__init__() input_size = cfg.INPUT_SIZE hidden_size = cfg.HIDDEN_SIZE self.stride = cfg.STRIDE self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1) self.text_linear = nn.Linear(hidden_size, hidden_size) def forward(self, visual_input, text_feature): _, _, v_len = visual_input.shape vis_att = torch.relu(self.vis_conv(visual_input)) text_att = torch.relu(self.text_linear(text_feature)) att = torch.matmul(text_att.unsqueeze(1), vis_att).transpose(1, 2) seg_list = [] for i in range(v_len // self.stride): vis_seg = visual_input[:, :, self.stride * i:self.stride * (i + 1) ].transpose(1, 2) att_seg = torch.softmax(att[:, self.stride * i:self.stride * (i + 1), :], dim=1) vis_new = torch.sum(vis_seg * att_seg, dim=1) seg_list.append(vis_new) vis_out = torch.relu(self.vis_conv(torch.stack(seg_list, dim=2))) return vis_out def get_inputs(): return [torch.rand([4, 4, 4]), torch.rand([4, 4])] def get_init_inputs(): return [[], {'cfg': _mock_config(INPUT_SIZE=4, HIDDEN_SIZE=4, STRIDE=4)}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x2, tmp4, xmask) tl.store(out_ptr0 + x2, tmp6, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(in_out_ptr0 + x3, tmp4, xmask) tl.store(out_ptr0 + x3, tmp6, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_mul_sum_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 4), (16, 4, 1)) buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.mm(primals_6, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1) del primals_4 buf2 = buf1 del buf1 buf11 = empty_strided_cuda((4, 4), (4, 1), torch.bool) get_raw_stream(0) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf2, primals_5, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_5 buf3 = buf0 del buf0 buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_1[grid(64)](buf3, primals_3, buf12, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((4, 1, 4), (4, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (4, 1, 4), (4, 0, 1), 0 ), buf3, out=buf4) buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_2[grid(16)](buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused__softmax_3[grid(16)](buf5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1) buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0) del buf5 triton_poi_fused__softmax_mul_sum_4[grid(16)](primals_1, buf6, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf6 buf8 = extern_kernels.convolution(reinterpret_tensor(buf7, (4, 4, 1 ), (4, 1, 0), 0), primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf8, (4, 4, 1), (4, 1, 1)) buf9 = buf8 del buf8 buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.bool) triton_poi_fused_relu_threshold_backward_0[grid(16)](buf9, primals_3, buf10, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_3 return buf9, primals_1, primals_2, primals_6, buf4, reinterpret_tensor(buf7 , (4, 4, 1), (4, 1, 1), 0), buf10, reinterpret_tensor(buf2, (4, 4, 1), (4, 1, 4), 0), reinterpret_tensor(buf3, (4, 4, 4), (16, 1, 4), 0 ), buf11, buf12 class WordAttentionPoolNew(nn.Module): def __init__(self, cfg): super(WordAttentionPoolNew, self).__init__() input_size = cfg.INPUT_SIZE hidden_size = cfg.HIDDEN_SIZE self.stride = cfg.STRIDE self.vis_conv = nn.Conv1d(input_size, hidden_size, 1, 1) self.text_linear = nn.Linear(hidden_size, hidden_size) def forward(self, input_0, input_1): primals_2 = self.vis_conv.weight primals_3 = self.vis_conv.bias primals_4 = self.text_linear.weight primals_5 = self.text_linear.bias primals_1 = input_0 primals_6 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6]) return output[0]
CFM-MSG/Code_LEORN
WordAttentionPool
false
6,030
[ "MIT" ]
1
fabea1e1ded973a4db692e51e2df442bde55f626
https://github.com/CFM-MSG/Code_LEORN/tree/fabea1e1ded973a4db692e51e2df442bde55f626
CrossNet
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/52/c52kaji6eyiltsqxp7w6aivnhxv5zwp3fyswckudklooo5a6gore.py # Topologically Sorted Source Nodes: [xl_w], Original ATen: [aten.clone, aten._unsafe_view] # Source node to ATen node mapping: # xl_w => clone, view # Graph fragment: # %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format}) # %view : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [64, 4]), kwargs = {}) triton_poi_fused__unsafe_view_clone_0 = async_compile.triton('triton_poi_fused__unsafe_view_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64, 4], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_view_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + ((16*x1) + (64*(y0 // 16)) + (y0 % 16)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/c5/cc5niyk56fa3m2oc45eqgnf3eeposiu56eefgo6q2ozqlq3nwxnc.py # Topologically Sorted Source Nodes: [dot_], Original ATen: [aten.clone] # Source node to ATen node mapping: # dot_ => clone_1 # Graph fragment: # %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/p5/cp5pohlvud22ry7jg7rk2cbrxg2zgd5m6ftrmu3zisebczw32ykt.py # Topologically Sorted Source Nodes: [dot_], Original ATen: [aten.clone] # Source node to ATen node mapping: # dot_ => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = (xindex // 64) x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x3), tmp0, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/rj/crjfvijs5tvs7lmrx3tnlbqpiqf6fftdmggpgew3khzaaiwhtbmp.py # Topologically Sorted Source Nodes: [xl_w_1], Original ATen: [aten.clone] # Source node to ATen node mapping: # xl_w_1 => clone_3 # Graph fragment: # %clone_3 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = (xindex // 256) x5 = (xindex // 16) % 16 x2 = (xindex // 16) % 4 x6 = (xindex // 4) % 16 x7 = xindex tmp0 = tl.load(in_ptr0 + (x5 + (16*x0) + (64*x4)), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x6 + (16*x0) + (64*x4)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + (x7), tmp4, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/ho/cho3ytdwjrelha3yb4kblaihfa7pdkxvm7w76zy4lipwg7jvufdx.py # Topologically Sorted Source Nodes: [add, x_l, add_2, x_l_1, x_l_2], Original ATen: [aten.add, aten.squeeze] # Source node to ATen node mapping: # add => add # add_2 => add_2 # x_l => add_1 # x_l_1 => add_3 # x_l_2 => squeeze # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %select_1), kwargs = {}) # %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %unsqueeze), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_11, %select_3), kwargs = {}) # %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %add_1), kwargs = {}) # %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.dim](args = (%add_3, 2), kwargs = {}) triton_poi_fused_add_squeeze_4 = async_compile.triton('triton_poi_fused_add_squeeze_4', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1024], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_squeeze_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_squeeze_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = (xindex // 4) x1 = (xindex // 4) % 4 x3 = (xindex // 64) x6 = xindex % 16 x7 = xindex tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x6 + (16*x3)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp2 + tmp7 tl.store(out_ptr0 + (x7), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (2, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [xl_w], Original ATen: [aten.clone, aten._unsafe_view] stream0 = get_raw_stream(0) triton_poi_fused__unsafe_view_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [xl_w], Original ATen: [aten.mm] extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 1), (1, 1), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_], Original ATen: [aten.clone] triton_poi_fused_clone_1.run(primals_1, buf2, 1024, grid=grid(1024), stream=stream0) buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_], Original ATen: [aten.clone] triton_poi_fused_clone_2.run(buf1, buf3, 256, grid=grid(256), stream=stream0) del buf1 buf4 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [dot_], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [xl_w_1], Original ATen: [aten.clone] triton_poi_fused_clone_3.run(buf4, primals_3, primals_1, buf5, 1024, grid=grid(1024), stream=stream0) buf6 = reinterpret_tensor(buf3, (256, 1), (1, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [xl_w_1], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(buf5, (256, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 1), 4), out=buf6) buf7 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32) # Topologically Sorted Source Nodes: [dot__1], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (64, 4, 1), (4, 1, 1), 0), out=buf7) del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [add, x_l, add_2, x_l_1, x_l_2], Original ATen: [aten.add, aten.squeeze] triton_poi_fused_add_squeeze_4.run(buf7, primals_3, buf4, primals_1, buf8, 1024, grid=grid(1024), stream=stream0) del buf4 del buf7 del primals_1 del primals_3 return (buf8, reinterpret_tensor(buf2, (64, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf5, (4, 256), (1, 4), 0), reinterpret_tensor(primals_2, (1, 4), (1, 1), 4), reinterpret_tensor(buf0, (4, 64), (1, 4), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((2, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn from sklearn.metrics import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__unsafe_view_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 64 xnumel = 4 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (16 * x1 + 64 * (y0 // 16) + y0 % 16), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask) @triton.jit def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x2 = xindex // 64 x3 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy= 'evict_last') tl.store(out_ptr0 + x3, tmp0, xmask) @triton.jit def triton_poi_fused_clone_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = xindex // 256 x5 = xindex // 16 % 16 x2 = xindex // 16 % 4 x6 = xindex // 4 % 16 x7 = xindex tmp0 = tl.load(in_ptr0 + (x5 + 16 * x0 + 64 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x6 + 16 * x0 + 64 * x4), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tl.store(out_ptr0 + x7, tmp4, xmask) @triton.jit def triton_poi_fused_add_squeeze_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex // 4 x1 = xindex // 4 % 4 x3 = xindex // 64 x6 = xindex % 16 x7 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr3 + (x6 + 16 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 + tmp1 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp2 + tmp7 tl.store(out_ptr0 + x7, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (2, 4, 1), (4, 1, 1)) assert_size_stride(primals_3, (2, 4, 1), (4, 1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__unsafe_view_clone_0[grid(64, 4)](primals_1, buf0, 64, 4, XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1) buf1 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.mm(buf0, reinterpret_tensor(primals_2, (4, 1), (1, 1 ), 0), out=buf1) buf2 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_1[grid(1024)](primals_1, buf2, 1024, XBLOCK= 128, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((4, 4, 4, 4, 1), (64, 16, 4, 1, 1), torch .float32) triton_poi_fused_clone_2[grid(256)](buf1, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf1 buf4 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (64, 4, 1), (4, 1, 0), 0), out=buf4) buf5 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_clone_3[grid(1024)](buf4, primals_3, primals_1, buf5, 1024, XBLOCK=128, num_warps=4, num_stages=1) buf6 = reinterpret_tensor(buf3, (256, 1), (1, 1), 0) del buf3 extern_kernels.mm(reinterpret_tensor(buf5, (256, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 1), (1, 1), 4), out=buf6) buf7 = empty_strided_cuda((64, 4, 1), (4, 1, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(buf2, (64, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf6, (64, 4, 1), (4, 1, 1), 0), out=buf7) del buf6 buf8 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_add_squeeze_4[grid(1024)](buf7, primals_3, buf4, primals_1, buf8, 1024, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del buf7 del primals_1 del primals_3 return buf8, reinterpret_tensor(buf2, (64, 4, 4), (16, 1, 4), 0 ), reinterpret_tensor(buf5, (4, 256), (1, 4), 0), reinterpret_tensor( primals_2, (1, 4), (1, 1), 4), reinterpret_tensor(buf0, (4, 64), (1, 4), 0) class CrossNetNew(nn.Module): """The Cross Network part of Deep&Cross Network model, which leans both low and high degree cross feature. Input shape - 2D tensor with shape: ``(batch_size, units)``. Output shape - 2D tensor with shape: ``(batch_size, units)``. Arguments - **in_features** : Positive integer, dimensionality of input features. - **input_feature_num**: Positive integer, shape(Input tensor)[-1] - **layer_num**: Positive integer, the cross layer number - **parameterization**: string, ``"vector"`` or ``"matrix"`` , way to parameterize the cross network. - **l2_reg**: float between 0 and 1. L2 regularizer strength applied to the kernel weights matrix - **seed**: A Python integer to use as random seed. References - [Wang R, Fu B, Fu G, et al. Deep & cross network for ad click predictions[C]//Proceedings of the ADKDD'17. ACM, 2017: 12.](https://arxiv.org/abs/1708.05123) - [Wang R, Shivanna R, Cheng D Z, et al. DCN-M: Improved Deep & Cross Network for Feature Cross Learning in Web-scale Learning to Rank Systems[J]. 2020.](https://arxiv.org/abs/2008.13535) """ def __init__(self, in_features, layer_num=2, parameterization='vector', seed=1024, device='cpu'): super(CrossNetNew, self).__init__() self.layer_num = layer_num self.parameterization = parameterization if self.parameterization == 'vector': self.kernels = nn.Parameter(torch.Tensor(self.layer_num, in_features, 1)) elif self.parameterization == 'matrix': self.kernels = nn.Parameter(torch.Tensor(self.layer_num, in_features, in_features)) else: raise ValueError("parameterization should be 'vector' or 'matrix'") self.bias = nn.Parameter(torch.Tensor(self.layer_num, in_features, 1)) for i in range(self.kernels.shape[0]): nn.init.xavier_normal_(self.kernels[i]) for i in range(self.bias.shape[0]): nn.init.zeros_(self.bias[i]) self def forward(self, input_0): primals_2 = self.kernels primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Fanxingye/DeepRS
CrossNet
false
14,035
[ "Apache-2.0" ]
1,770
06b98cf2cb2781656805eafc577fbd088f37d17d
https://github.com/Fanxingye/DeepRS/tree/06b98cf2cb2781656805eafc577fbd088f37d17d
Reorg
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/ci/cci6zns5syayrxwleb662v7mnw5anubbito3ni6oqyxgo26m5ydb.py # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] # Source node to ATen node mapping: # x_2 => clone_2 # Graph fragment: # %clone_2 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format}) triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 2 x3 = (xindex // 2) y0 = yindex % 4 y1 = (yindex // 4) x5 = xindex y4 = yindex tmp0 = tl.load(in_ptr0 + ((2*x2) + (4*(y0 // 2)) + (8*x3) + (64*y1) + (y0 % 2)), xmask & ymask) tl.store(out_ptr0 + (x5 + (16*y4)), tmp0, xmask & ymask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2, 2), (64, 16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.clone] stream0 = get_raw_stream(0) triton_poi_fused_clone_0.run(arg0_1, buf0, 16, 16, grid=grid(16, 16), stream=stream0) del arg0_1 return (reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl. constexpr, XBLOCK: tl.constexpr): ynumel = 16 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex % 2 x3 = xindex // 2 y0 = yindex % 4 y1 = yindex // 4 x5 = xindex y4 = yindex tmp0 = tl.load(in_ptr0 + (2 * x2 + 4 * (y0 // 2) + 8 * x3 + 64 * y1 + y0 % 2), xmask & ymask) tl.store(out_ptr0 + (x5 + 16 * y4), tmp0, xmask & ymask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 2, 2), (64, 16, 4, 2, 1), torch .float32) get_raw_stream(0) triton_poi_fused_clone_0[grid(16, 16)](arg0_1, buf0, 16, 16, XBLOCK =16, YBLOCK=16, num_warps=4, num_stages=1) del arg0_1 return reinterpret_tensor(buf0, (4, 16, 2, 2), (64, 4, 2, 1), 0), class ReorgNew(nn.Module): def __init__(self, stride=2): super(ReorgNew, self).__init__() self.stride = stride def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
CharlesPikachu/CharlesFace
Reorg
false
7,840
[ "MIT" ]
13
90bfe38c58068228d0069dce43b55b2570acaa16
https://github.com/CharlesPikachu/CharlesFace/tree/90bfe38c58068228d0069dce43b55b2570acaa16
HardSwish
import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data import torchvision.transforms.functional as F from torch.nn import functional as F def hard_swish(x, inplace: 'bool'=False): inner = F.relu6(x + 3.0).div_(6.0) return x.mul_(inner) if inplace else x.mul(inner) class HardSwish(nn.Module): def __init__(self, inplace: 'bool'=False): super(HardSwish, self).__init__() self.inplace = inplace def forward(self, x): return hard_swish(x, self.inplace) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.nn as nn import torch.nn.functional as F import torch.utils.data import torchvision.transforms.functional as F from torch.nn import functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_hardtanh_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 3.0 tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp5 = 6.0 tmp6 = triton_helpers.minimum(tmp4, tmp5) tmp7 = 0.16666666666666666 tmp8 = tmp6 * tmp7 tmp9 = tmp0 * tmp8 tl.store(out_ptr0 + x0, tmp9, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_hardtanh_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 return buf0, def hard_swish(x, inplace: 'bool'=False): inner = F.relu6(x + 3.0).div_(6.0) return x.mul_(inner) if inplace else x.mul(inner) class HardSwishNew(nn.Module): def __init__(self, inplace: 'bool'=False): super(HardSwishNew, self).__init__() self.inplace = inplace def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
BigFishMaster/tnt
HardSwish
false
17,506
[ "BSD-3-Clause" ]
3
8b80bb3b194eb87ac18924428ef0924c2fb263c5
https://github.com/BigFishMaster/tnt/tree/8b80bb3b194eb87ac18924428ef0924c2fb263c5
SPPblock
import torch from torch import nn import torch.nn.functional as F import torch.nn.parallel class SPPblock(nn.Module): def __init__(self, in_channels): super(SPPblock, self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=2) self.pool2 = nn.MaxPool2d(kernel_size=[3, 3], stride=3) self.pool3 = nn.MaxPool2d(kernel_size=[5, 5], stride=5) self.pool4 = nn.MaxPool2d(kernel_size=[6, 6], stride=6) self.conv = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1, padding=0) def forward(self, x): self.in_channels, h, w = x.size(1), x.size(2), x.size(3) self.layer1 = F.interpolate(self.conv(self.pool1(x)), size=(h, w), mode='bilinear') self.layer2 = F.interpolate(self.conv(self.pool2(x)), size=(h, w), mode='bilinear') self.layer3 = F.interpolate(self.conv(self.pool3(x)), size=(h, w), mode='bilinear') self.layer4 = F.interpolate(self.conv(self.pool4(x)), size=(h, w), mode='bilinear') out = torch.cat([self.layer1, self.layer2, self.layer3, self.layer4, x], 1) return out def get_inputs(): return [torch.rand([4, 4, 64, 64])] def get_init_inputs(): return [[], {'in_channels': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch import nn import torch.nn.parallel assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 32 x1 = xindex // 32 x2 = xindex tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy ='evict_last') tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tl.store(out_ptr0 + x2, tmp6, None) @triton.jit def triton_poi_fused__to_copy_1(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tl.store(out_ptr0 + x0, tmp8, xmask) @triton.jit def triton_poi_fused_add_clamp_2(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tl.full([1], 1, tl.int64) tmp10 = tmp8 + tmp9 tmp11 = tl.full([1], 31, tl.int64) tmp12 = triton_helpers.minimum(tmp10, tmp11) tl.store(out_ptr0 + x0, tmp12, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = tmp3 * tmp2 tmp5 = tmp4 - tmp2 tmp6 = 0.0 tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp7.to(tl.int32) tmp9 = tmp8.to(tl.float32) tmp10 = tmp7 - tmp9 tmp11 = triton_helpers.maximum(tmp10, tmp6) tmp12 = 1.0 tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 7056 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 21 x1 = xindex // 21 % 21 x4 = xindex // 441 x3 = xindex // 1764 x5 = xindex % 1764 tmp0 = tl.load(in_ptr0 + (3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (64 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (65 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (66 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (128 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (129 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (130 + 3 * x0 + 192 * x1 + 4096 * x4), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tl.store(out_ptr0 + (x5 + 1792 * x3), tmp16, xmask) @triton.jit def triton_poi_fused__to_copy_5(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.328125 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_6(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.328125 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 20, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.328125 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused_max_pool2d_with_indices_8(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 2304 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 12 x1 = xindex // 12 % 12 x2 = xindex // 144 x3 = xindex tmp0 = tl.load(in_ptr0 + (5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (4 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (64 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp11 = tl.load(in_ptr0 + (65 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp13 = tl.load(in_ptr0 + (66 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr0 + (67 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr0 + (68 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp19 = tl.load(in_ptr0 + (128 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (129 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr0 + (130 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp25 = tl.load(in_ptr0 + (131 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr0 + (132 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr0 + (192 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr0 + (193 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp33 = tl.load(in_ptr0 + (194 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr0 + (195 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp37 = tl.load(in_ptr0 + (196 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp39 = tl.load(in_ptr0 + (256 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp41 = tl.load(in_ptr0 + (257 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp43 = tl.load(in_ptr0 + (258 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp45 = tl.load(in_ptr0 + (259 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp47 = tl.load(in_ptr0 + (260 + 5 * x0 + 320 * x1 + 4096 * x2), xmask, eviction_policy='evict_last') tmp2 = triton_helpers.maximum(tmp1, tmp0) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp6 = triton_helpers.maximum(tmp5, tmp4) tmp8 = triton_helpers.maximum(tmp7, tmp6) tmp10 = triton_helpers.maximum(tmp9, tmp8) tmp12 = triton_helpers.maximum(tmp11, tmp10) tmp14 = triton_helpers.maximum(tmp13, tmp12) tmp16 = triton_helpers.maximum(tmp15, tmp14) tmp18 = triton_helpers.maximum(tmp17, tmp16) tmp20 = triton_helpers.maximum(tmp19, tmp18) tmp22 = triton_helpers.maximum(tmp21, tmp20) tmp24 = triton_helpers.maximum(tmp23, tmp22) tmp26 = triton_helpers.maximum(tmp25, tmp24) tmp28 = triton_helpers.maximum(tmp27, tmp26) tmp30 = triton_helpers.maximum(tmp29, tmp28) tmp32 = triton_helpers.maximum(tmp31, tmp30) tmp34 = triton_helpers.maximum(tmp33, tmp32) tmp36 = triton_helpers.maximum(tmp35, tmp34) tmp38 = triton_helpers.maximum(tmp37, tmp36) tmp40 = triton_helpers.maximum(tmp39, tmp38) tmp42 = triton_helpers.maximum(tmp41, tmp40) tmp44 = triton_helpers.maximum(tmp43, tmp42) tmp46 = triton_helpers.maximum(tmp45, tmp44) tmp48 = triton_helpers.maximum(tmp47, tmp46) tl.store(out_ptr0 + x3, tmp48, xmask) @triton.jit def triton_poi_fused__to_copy_9(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.1875 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_10(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.1875 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 11, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.1875 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.15625 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tl.store(out_ptr0 + x0, tmp9, xmask) @triton.jit def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.15625 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tl.full([1], 1, tl.int64) tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 9, tl.int64) tmp13 = triton_helpers.minimum(tmp11, tmp12) tl.store(out_ptr0 + x0, tmp13, xmask) @triton.jit def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14(out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 + tmp2 tmp4 = 0.15625 tmp5 = tmp3 * tmp4 tmp6 = tmp5 - tmp2 tmp7 = 0.0 tmp8 = triton_helpers.maximum(tmp6, tmp7) tmp9 = tmp8.to(tl.int32) tmp10 = tmp9.to(tl.float32) tmp11 = tmp8 - tmp10 tmp12 = triton_helpers.maximum(tmp11, tmp7) tmp13 = 1.0 tmp14 = triton_helpers.minimum(tmp12, tmp13) tl.store(out_ptr0 + x0, tmp14, xmask) @triton.jit def triton_poi_fused__unsafe_index_add_convolution_mul_sub_15(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, in_ptr24, in_ptr25, in_ptr26, in_ptr27, in_ptr28, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 64 % 64 x0 = xindex % 64 x2 = xindex // 4096 x3 = xindex tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr3 + 0) tmp11 = tl.broadcast_to(tmp10, [XBLOCK]) tmp13 = tl.load(in_ptr4 + x0, None, eviction_policy='evict_last') tmp20 = tl.load(in_ptr5 + x0, None, eviction_policy='evict_last') tmp23 = tl.load(in_ptr6 + x1, None, eviction_policy='evict_last') tmp35 = tl.load(in_ptr7 + x1, None, eviction_policy='evict_last') tmp38 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last') tmp43 = tl.load(in_ptr9 + x0, None, eviction_policy='evict_last') tmp49 = tl.load(in_ptr11 + x0, None, eviction_policy='evict_last') tmp56 = tl.load(in_ptr12 + x0, None, eviction_policy='evict_last') tmp59 = tl.load(in_ptr13 + x1, None, eviction_policy='evict_last') tmp71 = tl.load(in_ptr14 + x1, None, eviction_policy='evict_last') tmp74 = tl.load(in_ptr15 + x1, None, eviction_policy='evict_last') tmp79 = tl.load(in_ptr16 + x0, None, eviction_policy='evict_last') tmp85 = tl.load(in_ptr18 + x0, None, eviction_policy='evict_last') tmp92 = tl.load(in_ptr19 + x0, None, eviction_policy='evict_last') tmp95 = tl.load(in_ptr20 + x1, None, eviction_policy='evict_last') tmp107 = tl.load(in_ptr21 + x1, None, eviction_policy='evict_last') tmp110 = tl.load(in_ptr22 + x1, None, eviction_policy='evict_last') tmp115 = tl.load(in_ptr23 + x0, None, eviction_policy='evict_last') tmp121 = tl.load(in_ptr25 + x0, None, eviction_policy='evict_last') tmp128 = tl.load(in_ptr26 + x0, None, eviction_policy='evict_last') tmp131 = tl.load(in_ptr27 + x1, None, eviction_policy='evict_last') tmp143 = tl.load(in_ptr28 + x1, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 32, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x2), None, eviction_policy='evict_last') tmp12 = tmp9 + tmp11 tmp14 = tmp13 + tmp1 tmp15 = tmp13 < 0 tmp16 = tl.where(tmp15, tmp14, tmp13) tmp17 = tl.load(in_ptr2 + (tmp16 + 32 * tmp4 + 1024 * x2), None, eviction_policy='evict_last') tmp18 = tmp17 + tmp11 tmp19 = tmp18 - tmp12 tmp21 = tmp19 * tmp20 tmp22 = tmp12 + tmp21 tmp24 = tmp23 + tmp1 tmp25 = tmp23 < 0 tmp26 = tl.where(tmp25, tmp24, tmp23) tmp27 = tl.load(in_ptr2 + (tmp8 + 32 * tmp26 + 1024 * x2), None, eviction_policy='evict_last') tmp28 = tmp27 + tmp11 tmp29 = tl.load(in_ptr2 + (tmp16 + 32 * tmp26 + 1024 * x2), None, eviction_policy='evict_last') tmp30 = tmp29 + tmp11 tmp31 = tmp30 - tmp28 tmp32 = tmp31 * tmp20 tmp33 = tmp28 + tmp32 tmp34 = tmp33 - tmp22 tmp36 = tmp34 * tmp35 tmp37 = tmp22 + tmp36 tmp39 = tl.full([XBLOCK], 21, tl.int32) tmp40 = tmp38 + tmp39 tmp41 = tmp38 < 0 tmp42 = tl.where(tmp41, tmp40, tmp38) tmp44 = tmp43 + tmp39 tmp45 = tmp43 < 0 tmp46 = tl.where(tmp45, tmp44, tmp43) tmp47 = tl.load(in_ptr10 + (tmp46 + 21 * tmp42 + 441 * x2), None, eviction_policy='evict_last') tmp48 = tmp47 + tmp11 tmp50 = tmp49 + tmp39 tmp51 = tmp49 < 0 tmp52 = tl.where(tmp51, tmp50, tmp49) tmp53 = tl.load(in_ptr10 + (tmp52 + 21 * tmp42 + 441 * x2), None, eviction_policy='evict_last') tmp54 = tmp53 + tmp11 tmp55 = tmp54 - tmp48 tmp57 = tmp55 * tmp56 tmp58 = tmp48 + tmp57 tmp60 = tmp59 + tmp39 tmp61 = tmp59 < 0 tmp62 = tl.where(tmp61, tmp60, tmp59) tmp63 = tl.load(in_ptr10 + (tmp46 + 21 * tmp62 + 441 * x2), None, eviction_policy='evict_last') tmp64 = tmp63 + tmp11 tmp65 = tl.load(in_ptr10 + (tmp52 + 21 * tmp62 + 441 * x2), None, eviction_policy='evict_last') tmp66 = tmp65 + tmp11 tmp67 = tmp66 - tmp64 tmp68 = tmp67 * tmp56 tmp69 = tmp64 + tmp68 tmp70 = tmp69 - tmp58 tmp72 = tmp70 * tmp71 tmp73 = tmp58 + tmp72 tmp75 = tl.full([XBLOCK], 12, tl.int32) tmp76 = tmp74 + tmp75 tmp77 = tmp74 < 0 tmp78 = tl.where(tmp77, tmp76, tmp74) tmp80 = tmp79 + tmp75 tmp81 = tmp79 < 0 tmp82 = tl.where(tmp81, tmp80, tmp79) tmp83 = tl.load(in_ptr17 + (tmp82 + 12 * tmp78 + 144 * x2), None, eviction_policy='evict_last') tmp84 = tmp83 + tmp11 tmp86 = tmp85 + tmp75 tmp87 = tmp85 < 0 tmp88 = tl.where(tmp87, tmp86, tmp85) tmp89 = tl.load(in_ptr17 + (tmp88 + 12 * tmp78 + 144 * x2), None, eviction_policy='evict_last') tmp90 = tmp89 + tmp11 tmp91 = tmp90 - tmp84 tmp93 = tmp91 * tmp92 tmp94 = tmp84 + tmp93 tmp96 = tmp95 + tmp75 tmp97 = tmp95 < 0 tmp98 = tl.where(tmp97, tmp96, tmp95) tmp99 = tl.load(in_ptr17 + (tmp82 + 12 * tmp98 + 144 * x2), None, eviction_policy='evict_last') tmp100 = tmp99 + tmp11 tmp101 = tl.load(in_ptr17 + (tmp88 + 12 * tmp98 + 144 * x2), None, eviction_policy='evict_last') tmp102 = tmp101 + tmp11 tmp103 = tmp102 - tmp100 tmp104 = tmp103 * tmp92 tmp105 = tmp100 + tmp104 tmp106 = tmp105 - tmp94 tmp108 = tmp106 * tmp107 tmp109 = tmp94 + tmp108 tmp111 = tl.full([XBLOCK], 10, tl.int32) tmp112 = tmp110 + tmp111 tmp113 = tmp110 < 0 tmp114 = tl.where(tmp113, tmp112, tmp110) tmp116 = tmp115 + tmp111 tmp117 = tmp115 < 0 tmp118 = tl.where(tmp117, tmp116, tmp115) tmp119 = tl.load(in_ptr24 + (tmp118 + 10 * tmp114 + 100 * x2), None, eviction_policy='evict_last') tmp120 = tmp119 + tmp11 tmp122 = tmp121 + tmp111 tmp123 = tmp121 < 0 tmp124 = tl.where(tmp123, tmp122, tmp121) tmp125 = tl.load(in_ptr24 + (tmp124 + 10 * tmp114 + 100 * x2), None, eviction_policy='evict_last') tmp126 = tmp125 + tmp11 tmp127 = tmp126 - tmp120 tmp129 = tmp127 * tmp128 tmp130 = tmp120 + tmp129 tmp132 = tmp131 + tmp111 tmp133 = tmp131 < 0 tmp134 = tl.where(tmp133, tmp132, tmp131) tmp135 = tl.load(in_ptr24 + (tmp118 + 10 * tmp134 + 100 * x2), None, eviction_policy='evict_last') tmp136 = tmp135 + tmp11 tmp137 = tl.load(in_ptr24 + (tmp124 + 10 * tmp134 + 100 * x2), None, eviction_policy='evict_last') tmp138 = tmp137 + tmp11 tmp139 = tmp138 - tmp136 tmp140 = tmp139 * tmp128 tmp141 = tmp136 + tmp140 tmp142 = tmp141 - tmp130 tmp144 = tmp142 * tmp143 tmp145 = tmp130 + tmp144 tl.store(in_out_ptr0 + x3, tmp37, None) tl.store(in_out_ptr1 + x3, tmp73, None) tl.store(in_out_ptr2 + x3, tmp109, None) tl.store(in_out_ptr3 + x3, tmp145, None) @triton.jit def triton_poi_fused_cat_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 4096 % 8 x0 = xindex % 4096 x2 = xindex // 32768 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x2), tmp4, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 2, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tmp6 & tmp8 tmp10 = tl.load(in_ptr1 + (x0 + 4096 * x2), tmp9, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp0 >= tmp7 tmp12 = tl.full([1], 3, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tmp11 & tmp13 tmp15 = tl.load(in_ptr2 + (x0 + 4096 * x2), tmp14, eviction_policy= 'evict_last', other=0.0) tmp16 = tmp0 >= tmp12 tmp17 = tl.full([1], 4, tl.int64) tmp18 = tmp0 < tmp17 tmp19 = tmp16 & tmp18 tmp20 = tl.load(in_ptr3 + (x0 + 4096 * x2), tmp19, eviction_policy= 'evict_last', other=0.0) tmp21 = tmp0 >= tmp17 tl.full([1], 8, tl.int64) tmp24 = tl.load(in_ptr4 + (x0 + 4096 * (-4 + x1) + 16384 * x2), tmp21, other=0.0) tmp25 = tl.where(tmp19, tmp20, tmp24) tmp26 = tl.where(tmp14, tmp15, tmp25) tmp27 = tl.where(tmp9, tmp10, tmp26) tmp28 = tl.where(tmp4, tmp5, tmp27) tl.store(out_ptr0 + x3, tmp28, None) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 64, 64), (16384, 4096, 64, 1)) assert_size_stride(primals_2, (1, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_3, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 32, 32), (4096, 1024, 32, 1), torch.float32) get_raw_stream(0) triton_poi_fused_max_pool2d_with_indices_0[grid(16384)](primals_1, buf0, 16384, XBLOCK=128, num_warps=4, num_stages=1) buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 1, 32, 32), (1024, 1024, 32, 1)) buf2 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_1[grid(64)](buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_2[grid(64)](buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) buf4 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_1[grid(64)](buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_2[grid(64)](buf5, 64, XBLOCK=64, num_warps=1, num_stages=1) buf6 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(64)](buf6, 64, XBLOCK=64, num_warps=1, num_stages=1) buf11 = empty_strided_cuda((4, 4, 21, 21), (1792, 441, 21, 1), torch.float32) triton_poi_fused_max_pool2d_with_indices_4[grid(7056)](primals_1, buf11, 7056, XBLOCK=256, num_warps=4, num_stages=1) buf12 = extern_kernels.convolution(buf11, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf12, (4, 1, 21, 21), (441, 441, 21, 1)) buf13 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_5[grid(64)](buf13, 64, XBLOCK=64, num_warps=1, num_stages=1) buf14 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_6[grid(64)](buf14, 64, XBLOCK=64, num_warps=1, num_stages=1) buf15 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_5[grid(64)](buf15, 64, XBLOCK=64, num_warps=1, num_stages=1) buf16 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_6[grid(64)](buf16, 64, XBLOCK=64, num_warps=1, num_stages=1) buf17 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7[grid(64)](buf17, 64, XBLOCK=64, num_warps=1, num_stages=1) buf19 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_7[grid(64)](buf19, 64, XBLOCK=64, num_warps=1, num_stages=1) buf22 = empty_strided_cuda((4, 4, 12, 12), (576, 144, 12, 1), torch .float32) triton_poi_fused_max_pool2d_with_indices_8[grid(2304)](primals_1, buf22, 2304, XBLOCK=128, num_warps=4, num_stages=1) buf23 = extern_kernels.convolution(buf22, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf23, (4, 1, 12, 12), (144, 144, 12, 1)) buf24 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_9[grid(64)](buf24, 64, XBLOCK=64, num_warps=1, num_stages=1) buf25 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_10[grid(64)](buf25, 64, XBLOCK=64, num_warps=1, num_stages=1) buf26 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_9[grid(64)](buf26, 64, XBLOCK=64, num_warps=1, num_stages=1) buf27 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_10[grid(64)](buf27, 64, XBLOCK=64, num_warps=1, num_stages=1) buf28 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11[grid(64)](buf28, 64, XBLOCK=64, num_warps=1, num_stages=1) buf30 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_11[grid(64)](buf30, 64, XBLOCK=64, num_warps=1, num_stages=1) buf33 = torch.ops.aten.max_pool2d_with_indices.default(primals_1, [ 6, 6], [6, 6]) buf34 = buf33[0] del buf33 buf36 = extern_kernels.convolution(buf34, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf36, (4, 1, 10, 10), (100, 100, 10, 1)) buf37 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused__to_copy_12[grid(64)](buf37, 64, XBLOCK=64, num_warps=1, num_stages=1) buf38 = empty_strided_cuda((64, 1), (1, 1), torch.int64) triton_poi_fused_add_clamp_13[grid(64)](buf38, 64, XBLOCK=64, num_warps=1, num_stages=1) buf39 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused__to_copy_12[grid(64)](buf39, 64, XBLOCK=64, num_warps=1, num_stages=1) buf40 = empty_strided_cuda((64,), (1,), torch.int64) triton_poi_fused_add_clamp_13[grid(64)](buf40, 64, XBLOCK=64, num_warps=1, num_stages=1) buf41 = empty_strided_cuda((64,), (1,), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(64)](buf41, 64, XBLOCK=64, num_warps=1, num_stages=1) buf43 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(64)](buf43, 64, XBLOCK=64, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((64, 1), (1, 1), torch.float32) triton_poi_fused__to_copy_add_arange_clamp_mul_sub_3[grid(64)](buf8, 64, XBLOCK=64, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) buf10 = reinterpret_tensor(buf9, (4, 1, 64, 64), (4096, 4096, 64, 1), 0 ) del buf9 buf20 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) buf21 = reinterpret_tensor(buf20, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf20 buf31 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) buf32 = reinterpret_tensor(buf31, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf31 buf44 = empty_strided_cuda((4, 1, 64, 64), (4096, 16384, 64, 1), torch.float32) buf45 = reinterpret_tensor(buf44, (4, 1, 64, 64), (4096, 4096, 64, 1), 0) del buf44 triton_poi_fused__unsafe_index_add_convolution_mul_sub_15[grid(16384)]( buf10, buf21, buf32, buf45, buf2, buf4, buf1, primals_3, buf5, buf6, buf3, buf8, buf13, buf15, buf12, buf16, buf17, buf14, buf19, buf24, buf26, buf23, buf27, buf28, buf25, buf30, buf37, buf39, buf36, buf40, buf41, buf38, buf43, 16384, XBLOCK=256, num_warps=4, num_stages=1) del buf1 del buf12 del buf23 del buf36 del primals_3 buf46 = empty_strided_cuda((4, 8, 64, 64), (32768, 4096, 64, 1), torch.float32) triton_poi_fused_cat_16[grid(131072)](buf10, buf21, buf32, buf45, primals_1, buf46, 131072, XBLOCK=512, num_warps=8, num_stages=1) del primals_1 return (buf46, buf45, buf32, buf21, buf10, primals_2, buf0, buf2, buf3, buf4, buf5, buf6, buf8, buf11, buf13, buf14, buf15, buf16, buf17, buf19, buf22, buf24, buf25, buf26, buf27, buf28, buf30, buf34, buf37, buf38, buf39, buf40, buf41, buf43) class SPPblockNew(nn.Module): def __init__(self, in_channels): super(SPPblockNew, self).__init__() self.pool1 = nn.MaxPool2d(kernel_size=[2, 2], stride=2) self.pool2 = nn.MaxPool2d(kernel_size=[3, 3], stride=3) self.pool3 = nn.MaxPool2d(kernel_size=[5, 5], stride=5) self.pool4 = nn.MaxPool2d(kernel_size=[6, 6], stride=6) self.conv = nn.Conv2d(in_channels=in_channels, out_channels=1, kernel_size=1, padding=0) def forward(self, input_0): primals_2 = self.conv.weight primals_3 = self.conv.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Minerva-J/Pytorch-Segmentation-multi-models
SPPblock
false
14,071
[ "Apache-2.0" ]
84
0845b54d4fbc8d38c70f158054b7ab1be2b3ceb9
https://github.com/Minerva-J/Pytorch-Segmentation-multi-models/tree/0845b54d4fbc8d38c70f158054b7ab1be2b3ceb9
PixelNorm
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/ln/clnknwdlswmj3l56wk4ertm47scosxi4dtpndhkl7cvrs2tmbcj5.py # Topologically Sorted Source Nodes: [pow_1, mean, add, rsqrt, mul], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul] # Source node to ATen node mapping: # add => add # mean => mean # mul => mul # pow_1 => pow_1 # rsqrt => rsqrt # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, 1e-08), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %rsqrt), kwargs = {}) triton_poi_fused_add_mean_mul_pow_rsqrt_0 = async_compile.triton('triton_poi_fused_add_mean_mul_pow_rsqrt_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_pow_rsqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + (x3), tmp17, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, mean, add, rsqrt, mul], Original ATen: [aten.pow, aten.mean, aten.add, aten.rsqrt, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_mean_mul_pow_rsqrt_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_mean_mul_pow_rsqrt_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = 4.0 tmp13 = tmp11 / tmp12 tmp14 = 1e-08 tmp15 = tmp13 + tmp14 tmp16 = libdevice.rsqrt(tmp15) tmp17 = tmp0 * tmp16 tl.store(out_ptr0 + x3, tmp17, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_mean_mul_pow_rsqrt_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class PixelNormNew(nn.Module): def __init__(self): super().__init__() def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
Dolorousrtur/style-people
PixelNorm
false
8,003
[ "MIT" ]
15
c48b12b245cc50f8230c0654dffe40016f2a69f1
https://github.com/Dolorousrtur/style-people/tree/c48b12b245cc50f8230c0654dffe40016f2a69f1
SceneParserHead
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/sg/csgdg3aydvavjgap6qhul7z76ytwauxsmjtmjmpyykxu7qjn2g4l.py # Topologically Sorted Source Nodes: [x], Original ATen: [aten.unsqueeze] # Source node to ATen node mapping: # x => unsqueeze # Graph fragment: # %unsqueeze : [num_users=2] = call_function[target=torch.ops.aten.unsqueeze.default](args = (%primals_3, 0), kwargs = {}) triton_poi_fused_unsqueeze_0 = async_compile.triton('triton_poi_fused_unsqueeze_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4, 16], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_unsqueeze_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_unsqueeze_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (x1 + (16*y0)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (4*x1)), tmp0, xmask & ymask) ''', device_str='cuda') # kernel path: runs/run_shard_8/inductor_cache/ib/cibhtju55xp4knpcrfnffs5cery4dvtpmr4b6n5a4xieye3owbpr.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mean] # Source node to ATen node mapping: # x_1 => mean # Graph fragment: # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%squeeze, [-1, -2], True), kwargs = {}) triton_per_fused_mean_1 = async_compile.triton('triton_per_fused_mean_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[2048, 16], reduction_hint=ReductionHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 2048 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (2048*r1)), None) tmp1 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = 16.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp7, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2048, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (2048, ), (1, )) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 2048), (2048, 1)) assert_size_stride(primals_5, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 1, 16, 4), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.unsqueeze] stream0 = get_raw_stream(0) triton_poi_fused_unsqueeze_0.run(primals_3, buf0, 4, 16, grid=grid(4, 16), stream=stream0) del primals_3 # Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution] buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (1, 2048, 4, 4), (32768, 1, 8192, 2048)) buf2 = empty_strided_cuda((2048, 1, 1), (1, 2048, 2048), torch.float32) buf3 = reinterpret_tensor(buf2, (2048, 1, 1), (1, 1, 1), 0); del buf2 # reuse # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mean] triton_per_fused_mean_1.run(buf3, buf1, primals_2, 2048, 16, grid=grid(2048), stream=stream0) del buf1 del primals_2 buf4 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (1, 2048), (0, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf4) del primals_5 return (reinterpret_tensor(buf4, (4, ), (1, ), 0), primals_1, buf0, reinterpret_tensor(buf3, (1, 2048), (2048, 1), 0), primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((2048, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_unsqueeze_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr): ynumel = 4 xnumel = 16 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x1 = xindex y0 = yindex tmp0 = tl.load(in_ptr0 + (x1 + 16 * y0), xmask & ymask, eviction_policy ='evict_last') tl.store(out_ptr0 + (y0 + 4 * x1), tmp0, xmask & ymask) @triton.jit def triton_per_fused_mean_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 2048 * r1), None) tmp1 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK]) tmp5 = tl.sum(tmp3, 1)[:, None] tmp6 = 16.0 tmp7 = tmp5 / tmp6 tl.debug_barrier() tl.store(in_out_ptr0 + x0, tmp7, None) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (2048, 4, 1, 1), (4, 1, 1, 1)) assert_size_stride(primals_2, (2048,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 2048), (2048, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((1, 4, 4, 4), (64, 1, 16, 4), torch.float32) get_raw_stream(0) triton_poi_fused_unsqueeze_0[grid(4, 16)](primals_3, buf0, 4, 16, XBLOCK=16, YBLOCK=4, num_warps=1, num_stages=1) del primals_3 buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (1, 2048, 4, 4), (32768, 1, 8192, 2048)) buf2 = empty_strided_cuda((2048, 1, 1), (1, 2048, 2048), torch.float32) buf3 = reinterpret_tensor(buf2, (2048, 1, 1), (1, 1, 1), 0) del buf2 triton_per_fused_mean_1[grid(2048)](buf3, buf1, primals_2, 2048, 16, XBLOCK=32, num_warps=4, num_stages=1) del buf1 del primals_2 buf4 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf3, (1, 2048), (0, 1), 0), reinterpret_tensor(primals_4, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf4) del primals_5 return reinterpret_tensor(buf4, (4,), (1,), 0 ), primals_1, buf0, reinterpret_tensor(buf3, (1, 2048), (2048, 1), 0 ), primals_4 class SceneParserHeadNew(nn.Module): def __init__(self, in_channels, num_classes): super(SceneParserHeadNew, self).__init__() self.conv1x1 = nn.Conv2d(in_channels, 2048, 1, 1) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(2048, num_classes) def forward(self, input_0): primals_1 = self.conv1x1.weight primals_2 = self.conv1x1.bias primals_4 = self.fc.weight primals_5 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
hangwudy/pytorch_tutorial
SceneParserHead
false
10,184
[ "MIT" ]
0
857b128253bd1e2bd30cb85e995c757e5acbb3a2
https://github.com/hangwudy/pytorch_tutorial/tree/857b128253bd1e2bd30cb85e995c757e5acbb3a2
BiaffineAttention
import torch import torch.utils.checkpoint import torch.utils.data class BiaffineAttention(torch.nn.Module): """Implements a biaffine attention operator for binary relation classification. PyTorch implementation of the biaffine attention operator from "End-to-end neural relation extraction using deep biaffine attention" (https://arxiv.org/abs/1812.11275) which can be used as a classifier for binary relation classification. Args: in_features (int): The size of the feature dimension of the inputs. out_features (int): The size of the feature dimension of the output. Shape: - x_1: `(N, *, in_features)` where `N` is the batch dimension and `*` means any number of additional dimensisons. - x_2: `(N, *, in_features)`, where `N` is the batch dimension and `*` means any number of additional dimensions. - Output: `(N, *, out_features)`, where `N` is the batch dimension and `*` means any number of additional dimensions. Examples: >>> batch_size, in_features, out_features = 32, 100, 4 >>> biaffine_attention = BiaffineAttention(in_features, out_features) >>> x_1 = torch.randn(batch_size, in_features) >>> x_2 = torch.randn(batch_size, in_features) >>> output = biaffine_attention(x_1, x_2) >>> print(output.size()) torch.Size([32, 4]) """ def __init__(self, in_features, out_features): super(BiaffineAttention, self).__init__() self.in_features = in_features self.out_features = out_features self.bilinear = torch.nn.Bilinear(in_features, in_features, out_features, bias=False) self.linear = torch.nn.Linear(2 * in_features, out_features, bias=True) self.reset_parameters() def forward(self, x_1, x_2): return self.bilinear(x_1, x_2) + self.linear(torch.cat((x_1, x_2), dim=-1)) def reset_parameters(self): self.bilinear.reset_parameters() self.linear.reset_parameters() def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_features': 4, 'out_features': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.checkpoint import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 512 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 8 x1 = xindex // 8 x2 = xindex tmp0 = x0 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy= 'evict_last', other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp10 = tl.where(tmp4, tmp5, tmp9) tl.store(out_ptr0 + x2, tmp10, xmask) @triton.jit def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x2, xmask) tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp4 = tmp0 + tmp3 tl.store(in_out_ptr0 + x2, tmp4, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 8), (8, 1)) assert_size_stride(primals_5, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = torch.ops.aten._trilinear.default(reinterpret_tensor( primals_3, (64, 4), (4, 1), 0), primals_1, reinterpret_tensor( primals_2, (64, 4), (4, 1), 0), [1, 3], [0], [1, 2], [2, 3]) del primals_1 buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4, 4, 8), (128, 32, 8, 1), torch.float32) get_raw_stream(0) triton_poi_fused_cat_0[grid(512)](primals_3, primals_2, buf2, 512, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(buf2, (64, 8), (8, 1), 0), reinterpret_tensor(primals_4, (8, 4), (1, 8), 0), out=buf3) del primals_4 buf4 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf1 triton_poi_fused_add_1[grid(256)](buf4, buf3, primals_5, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf3 del primals_5 return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0 ), reinterpret_tensor(buf2, (64, 8), (8, 1), 0) class BiaffineAttentionNew(torch.nn.Module): """Implements a biaffine attention operator for binary relation classification. PyTorch implementation of the biaffine attention operator from "End-to-end neural relation extraction using deep biaffine attention" (https://arxiv.org/abs/1812.11275) which can be used as a classifier for binary relation classification. Args: in_features (int): The size of the feature dimension of the inputs. out_features (int): The size of the feature dimension of the output. Shape: - x_1: `(N, *, in_features)` where `N` is the batch dimension and `*` means any number of additional dimensisons. - x_2: `(N, *, in_features)`, where `N` is the batch dimension and `*` means any number of additional dimensions. - Output: `(N, *, out_features)`, where `N` is the batch dimension and `*` means any number of additional dimensions. Examples: >>> batch_size, in_features, out_features = 32, 100, 4 >>> biaffine_attention = BiaffineAttention(in_features, out_features) >>> x_1 = torch.randn(batch_size, in_features) >>> x_2 = torch.randn(batch_size, in_features) >>> output = biaffine_attention(x_1, x_2) >>> print(output.size()) torch.Size([32, 4]) """ def __init__(self, in_features, out_features): super(BiaffineAttentionNew, self).__init__() self.in_features = in_features self.out_features = out_features self.bilinear = torch.nn.Bilinear(in_features, in_features, out_features, bias=False) self.linear = torch.nn.Linear(2 * in_features, out_features, bias=True) self.reset_parameters() def reset_parameters(self): self.bilinear.reset_parameters() self.linear.reset_parameters() def forward(self, input_0, input_1): primals_1 = self.bilinear.weight primals_4 = self.linear.weight primals_5 = self.linear.bias primals_2 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0]
rushabh-v/unilm
BiaffineAttention
false
7,605
[ "MIT" ]
1
a62a023bd5d3500c23ac454be0a8b0107e18a6ce
https://github.com/rushabh-v/unilm/tree/a62a023bd5d3500c23ac454be0a8b0107e18a6ce
CAM_Module
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/3m/c3mxgkf4weymbmbgydi4j4i6eycdz2flzbf3jce3eapte2aqyfta.py # Topologically Sorted Source Nodes: [energy_new], Original ATen: [aten.sub] # Source node to ATen node mapping: # energy_new => sub # Graph fragment: # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%expand, %bmm), kwargs = {}) triton_poi_fused_sub_0 = async_compile.triton('triton_poi_fused_sub_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + (x2), xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = tmp6 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => amax, exp, sub_1 # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%sub, [-1], True), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %amax), kwargs = {}) # %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {}) triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + (x2), tmp9, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] # Source node to ATen node mapping: # attention => div, sum_1 # Graph fragment: # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {}) triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/j4/cj4f6qdb45emg4zrdv5vzxtw2vswpyt2rqyalr6mxgomzeyk55j5.py # Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add] # Source node to ATen node mapping: # mul => mul # out_2 => add # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %view_3), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_1), kwargs = {}) triton_poi_fused_add_mul_3 = async_compile.triton('triton_poi_fused_add_mul_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (0)) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + (x0), xmask) tmp4 = tl.load(in_ptr2 + (x0), xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + (x0), tmp5, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy], Original ATen: [aten.bmm] extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [energy_new], Original ATen: [aten.sub] stream0 = get_raw_stream(0) triton_poi_fused_sub_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0) buf3 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [attention], Original ATen: [aten._softmax] triton_poi_fused__softmax_2.run(buf2, buf3, 64, grid=grid(64), stream=stream0) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) # Topologically Sorted Source Nodes: [attention, out], Original ATen: [aten._softmax, aten.bmm] extern_kernels.bmm(buf3, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf4) del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [mul, out_2], Original ATen: [aten.mul, aten.add] triton_poi_fused_add_mul_3.run(primals_2, buf4, primals_1, buf5, 256, grid=grid(256), stream=stream0) del primals_1 del primals_2 return (buf5, buf4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math from torch.nn import Module import torch.utils.data import torch from torch.nn import Parameter from torch.nn import Softmax assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr0 + x2, xmask) tmp2 = triton_helpers.maximum(tmp0, tmp1) tmp4 = triton_helpers.maximum(tmp2, tmp3) tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp8 = tmp6 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tmp9 = tl_math.exp(tmp8) tl.store(out_ptr0 + x2, tmp9, xmask) @triton.jit def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = tmp3 + tmp4 tmp7 = tmp5 + tmp6 tmp8 = tmp0 / tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_add_mul_3(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK]) tmp2 = tl.load(in_ptr1 + x0, xmask) tmp4 = tl.load(in_ptr2 + x0, xmask) tmp3 = tmp1 * tmp2 tmp5 = tmp3 + tmp4 tl.store(out_ptr0 + x0, tmp5, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) extern_kernels.bmm(reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), reinterpret_tensor(primals_1, (4, 16, 4), (64, 1, 16), 0), out=buf0) buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_sub_0[grid(64)](buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = buf0 del buf0 triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64, num_warps=1, num_stages=1) buf3 = buf1 del buf1 triton_poi_fused__softmax_2[grid(64)](buf2, buf3, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf2 buf4 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32) extern_kernels.bmm(buf3, reinterpret_tensor(primals_1, (4, 4, 16), (64, 16, 1), 0), out=buf4) del buf3 buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_mul_3[grid(256)](primals_2, buf4, primals_1, buf5, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_1 del primals_2 return buf5, buf4 class CAM_ModuleNew(Module): """ Channel attention module""" def __init__(self, in_dim): super(CAM_ModuleNew, self).__init__() self.chanel_in = in_dim self.gamma = Parameter(torch.zeros(1)) self.softmax = Softmax(dim=-1) def forward(self, input_0): primals_2 = self.gamma primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
mlcb-jlu/wsMedSeg
CAM_Module
false
4,021
[ "MIT" ]
0
63bd1fd28583f11444f292f4b961870ea1b12635
https://github.com/mlcb-jlu/wsMedSeg/tree/63bd1fd28583f11444f292f4b961870ea1b12635
InnerProductLayer
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_9/inductor_cache/cw/ccwvutm6zl37xd5gsiqvlmjxykaqval6ja4w7xuw5xctwfn5antt.py # Topologically Sorted Source Nodes: [p, q, inner_product, sum_1], Original ATen: [aten.index, aten.mul, aten.sum] # Source node to ATen node mapping: # inner_product => mul # p => index # q => index_1 # sum_1 => sum_1 # Graph fragment: # %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %lift_fresh_copy]), kwargs = {}) # %index_1 : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, %lift_fresh_copy_1]), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%index, %index_1), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {}) triton_poi_fused_index_mul_sum_0 = async_compile.triton('triton_poi_fused_index_mul_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[128], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) % 6 x0 = xindex % 4 x2 = (xindex // 24) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + ((4*x0) + (16*tmp16) + (64*x2)), xmask, eviction_policy='evict_last') tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + ((4*x0) + (16*tmp22) + (64*x2)), xmask, eviction_policy='evict_last') tmp24 = tmp17 * tmp23 tmp25 = tl.load(in_ptr0 + (1 + (4*x0) + (16*tmp16) + (64*x2)), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (1 + (4*x0) + (16*tmp22) + (64*x2)), xmask, eviction_policy='evict_last') tmp27 = tmp25 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr0 + (2 + (4*x0) + (16*tmp16) + (64*x2)), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (2 + (4*x0) + (16*tmp22) + (64*x2)), xmask, eviction_policy='evict_last') tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (3 + (4*x0) + (16*tmp16) + (64*x2)), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (3 + (4*x0) + (16*tmp22) + (64*x2)), xmask, eviction_policy='evict_last') tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tl.store(out_ptr0 + (x3), tmp36, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [p, q, inner_product, sum_1], Original ATen: [aten.index, aten.mul, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_index_mul_sum_0.run(arg0_1, buf0, 96, grid=grid(96), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_index_mul_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 96 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 6 x0 = xindex % 4 x2 = xindex // 24 x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 3, tl.int64) tmp2 = tmp0 < tmp1 tmp3 = tl.full([1], 1, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.full([1], 2, tl.int64) tmp6 = tmp0 < tmp5 tmp7 = tl.full([1], 0, tl.int64) tmp8 = tl.where(tmp6, tmp7, tmp7) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tl.full([1], 4, tl.int64) tmp11 = tmp0 < tmp10 tmp12 = tl.full([1], 5, tl.int64) tmp13 = tmp0 < tmp12 tmp14 = tl.where(tmp13, tmp3, tmp5) tmp15 = tl.where(tmp11, tmp3, tmp14) tmp16 = tl.where(tmp2, tmp9, tmp15) tmp17 = tl.load(in_ptr0 + (4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp18 = tl.where(tmp6, tmp5, tmp1) tmp19 = tl.where(tmp4, tmp3, tmp18) tmp20 = tl.where(tmp13, tmp1, tmp1) tmp21 = tl.where(tmp11, tmp5, tmp20) tmp22 = tl.where(tmp2, tmp19, tmp21) tmp23 = tl.load(in_ptr0 + (4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp24 = tmp17 * tmp23 tmp25 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp26 = tl.load(in_ptr0 + (1 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp27 = tmp25 * tmp26 tmp28 = tmp24 + tmp27 tmp29 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr0 + (2 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp31 = tmp29 * tmp30 tmp32 = tmp28 + tmp31 tmp33 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * tmp16 + 64 * x2), xmask, eviction_policy='evict_last') tmp34 = tl.load(in_ptr0 + (3 + 4 * x0 + 16 * tmp22 + 64 * x2), xmask, eviction_policy='evict_last') tmp35 = tmp33 * tmp34 tmp36 = tmp32 + tmp35 tl.store(out_ptr0 + x3, tmp36, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 6, 4), (24, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_index_mul_sum_0[grid(96)](arg0_1, buf0, 96, XBLOCK =128, num_warps=4, num_stages=1) del arg0_1 return buf0, class InnerProductLayerNew(nn.Module): """InnerProduct Layer used in PNN that compute the element-wise product or inner product between feature vectors. """ def __init__(self, num_feature_field, device): """ Args: num_feature_field(int) :number of feature fields. device(torch.device) : device object of the model. """ super(InnerProductLayerNew, self).__init__() self.num_feature_field = num_feature_field self def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
dreaming-qin/RecBole
InnerProductLayer
false
12,312
[ "MIT" ]
0
d6de39521484ded60c387ca604abaf86310acdbe
https://github.com/dreaming-qin/RecBole/tree/d6de39521484ded60c387ca604abaf86310acdbe
ConcatSquashConv2d
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/ej/cejcyvz7tzjp6tek7y3hc7b5nw6pshpwovikzdozuaem5aw7jmsd.py # Topologically Sorted Source Nodes: [conv2d, mul, add], Original ATen: [aten.convolution, aten.mul, aten.add] # Source node to ATen node mapping: # add => add # conv2d => convolution # mul => mul # Graph fragment: # %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %view_1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %view_3), kwargs = {}) triton_poi_fused_add_convolution_mul_0 = async_compile.triton('triton_poi_fused_add_convolution_mul_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_convolution_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 4) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = tmp2 * tmp4 tmp7 = tmp5 + tmp6 tl.store(in_out_ptr0 + (x3), tmp2, xmask) tl.store(out_ptr0 + (x3), tmp7, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (4, 1), (1, 1)) assert_size_stride(primals_6, (4, ), (1, )) assert_size_stride(primals_7, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm] extern_kernels.addmm(primals_6, primals_4, reinterpret_tensor(primals_5, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = empty_strided_cuda((1, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm] extern_kernels.mm(primals_4, reinterpret_tensor(primals_7, (1, 4), (1, 1), 0), out=buf3) del primals_7 buf1 = buf0; del buf0 # reuse buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) # Topologically Sorted Source Nodes: [conv2d, mul, add], Original ATen: [aten.convolution, aten.mul, aten.add] stream0 = get_raw_stream(0) triton_poi_fused_add_convolution_mul_0.run(buf1, primals_2, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0) del buf3 del primals_2 return (buf4, primals_1, primals_3, primals_4, buf1, buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((1, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_convolution_mul_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tl.sigmoid(tmp3) tmp5 = tmp2 * tmp4 tmp7 = tmp5 + tmp6 tl.store(in_out_ptr0 + x3, tmp2, xmask) tl.store(out_ptr0 + x3, tmp7, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (1, 1), (1, 1)) assert_size_stride(primals_5, (4, 1), (1, 1)) assert_size_stride(primals_6, (4,), (1,)) assert_size_stride(primals_7, (4, 1), (1, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 4, 2, 2), (16, 4, 2, 1)) buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_6, primals_4, reinterpret_tensor( primals_5, (1, 4), (1, 1), 0), alpha=1, beta=1, out=buf2) del primals_5 del primals_6 buf3 = empty_strided_cuda((1, 4), (4, 1), torch.float32) extern_kernels.mm(primals_4, reinterpret_tensor(primals_7, (1, 4), (1, 1), 0), out=buf3) del primals_7 buf1 = buf0 del buf0 buf4 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_convolution_mul_0[grid(64)](buf1, primals_2, buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1) del buf3 del primals_2 return buf4, primals_1, primals_3, primals_4, buf1, buf2 class ConcatSquashConv2dNew(nn.Module): def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False): super(ConcatSquashConv2dNew, self).__init__() module = nn.ConvTranspose2d if transpose else nn.Conv2d self._layer = module(dim_in, dim_out, kernel_size=ksize, stride= stride, padding=padding, dilation=dilation, groups=groups, bias =bias) self._hyper_gate = nn.Linear(1, dim_out) self._hyper_bias = nn.Linear(1, dim_out, bias=False) def forward(self, input_0, input_1): primals_1 = self._layer.weight primals_2 = self._layer.bias primals_5 = self._hyper_gate.weight primals_6 = self._hyper_gate.bias primals_7 = self._hyper_bias.weight primals_4 = input_0 primals_3 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
ClaraBing/ffjord
ConcatSquashConv2d
false
13,509
[ "MIT" ]
518
a97c34ff546a063316828f53bd041555e663428d
https://github.com/ClaraBing/ffjord/tree/a97c34ff546a063316828f53bd041555e663428d
EncoderImageWeightNormPrecomp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/vx/cvxzmthv4i2niuhjkx7pdwegys74ubmwp36fuzpk743r7lkqg4tm.py # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul] # Source node to ATen node mapping: # _weight_norm => div, mul, pow_1, pow_2, sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {}) # %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {}) # %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {}) triton_per_fused_div_mul_norm_0 = async_compile.triton('triton_per_fused_div_mul_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {4: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=(4,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 16 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] roffset = 0 rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), None) tmp6 = tl.load(in_ptr1 + (0)) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp5, None) tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp9, None) ''', device_str='cuda') # kernel path: runs/run_shard_4/inductor_cache/x2/cx2e5rdeusxxcqfojnklfpvernnjnuzqu3eanmgcyclgrncdxyae.py # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div] # Source node to ATen node mapping: # X => div_1 # norm => add # pow_1 => pow_3 # sqrt => sqrt # sum_1 => sum_2 # Graph fragment: # %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {}) # %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [-1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_2,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-08), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %add), kwargs = {}) triton_poi_fused_add_div_pow_sqrt_sum_1 = async_compile.triton('triton_poi_fused_add_div_pow_sqrt_sum_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_pow_sqrt_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + (x2), tmp15, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4, ), (1, )) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0; del buf0 # reuse buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [_weight_norm], Original ATen: [aten.norm, aten.div, aten.mul] stream0 = get_raw_stream(0) triton_per_fused_div_mul_norm_0.run(buf1, primals_2, primals_1, buf2, 1, 16, grid=grid(1), stream=stream0) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [features], Original ATen: [aten.addmm] extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_3 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, sqrt, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.add, aten.div] triton_poi_fused_add_div_pow_sqrt_sum_1.run(buf3, buf4, 256, grid=grid(256), stream=stream0) return (buf4, buf2, primals_1, primals_2, buf1, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), buf3, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from collections import OrderedDict import torch.nn as nn import torch.nn.init from torch.nn.utils.weight_norm import weight_norm assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_per_fused_div_mul_norm_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp6 = tl.load(in_ptr1 + 0) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = libdevice.sqrt(tmp4) tmp8 = tmp7 / tmp5 tmp9 = tmp0 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp5, None) tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp9, None) @triton.jit def triton_poi_fused_add_div_pow_sqrt_sum_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = 1e-08 tmp14 = tmp12 + tmp13 tmp15 = tmp0 / tmp14 tl.store(out_ptr0 + x2, tmp15, xmask) def call(args): primals_1, primals_2, primals_3, primals_4 = args args.clear() assert_size_stride(primals_1, (), ()) assert_size_stride(primals_2, (4, 4), (4, 1)) assert_size_stride(primals_3, (4,), (1,)) assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((), (), torch.float32) buf1 = buf0 del buf0 buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_mul_norm_0[grid(1)](buf1, primals_2, primals_1, buf2, 1, 16, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_3, reinterpret_tensor(primals_4, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3) del primals_3 buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_add_div_pow_sqrt_sum_1[grid(256)](buf3, buf4, 256, XBLOCK=128, num_warps=4, num_stages=1) return buf4, buf2, primals_1, primals_2, buf1, reinterpret_tensor(primals_4 , (64, 4), (4, 1), 0), buf3 def l2norm(X, dim, eps=1e-08): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps X = torch.div(X, norm) return X class EncoderImageWeightNormPrecompNew(nn.Module): def __init__(self, img_dim, embed_size, no_imgnorm=False): super(EncoderImageWeightNormPrecompNew, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.fc = weight_norm(nn.Linear(img_dim, embed_size), dim=None) def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImageWeightNormPrecompNew, self).load_state_dict(new_state ) def forward(self, input_0): primals_3 = self.fc.bias primals_1 = self.fc.weight_g primals_2 = self.fc.weight_v primals_4 = input_0 output = call([primals_1, primals_2, primals_3, primals_4]) return output[0]
ChopinSharp/SCAN
EncoderImageWeightNormPrecomp
false
4,996
[ "Apache-2.0" ]
1
4a165b2aeb3007685054d0c550540893b2006b17
https://github.com/ChopinSharp/SCAN/tree/4a165b2aeb3007685054d0c550540893b2006b17
Transition
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/nh/cnhx37tsffx4r7taj3xi72s7yfpnnccem24fupfbht6b7bzliavu.py # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.gelu] # Source node to ATen node mapping: # x_1 => add, erf, mul, mul_1, mul_2 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.5), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.7071067811865476), kwargs = {}) # %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {}) triton_poi_fused_gelu_0 = async_compile.triton('triton_poi_fused_gelu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + (x0), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.gelu] stream0 = get_raw_stream(0) triton_poi_fused_gelu_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_gelu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.5 tmp2 = tmp0 * tmp1 tmp3 = 0.7071067811865476 tmp4 = tmp0 * tmp3 tmp5 = libdevice.erf(tmp4) tmp6 = 1.0 tmp7 = tmp5 + tmp6 tmp8 = tmp2 * tmp7 tl.store(out_ptr0 + x0, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_gelu_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 class TransitionNew(nn.Module): def __init__(self, in_features, out_features, act_layer=nn.GELU): super(TransitionNew, self).__init__() self.act = act_layer() self.linear = nn.Linear(in_features, out_features) def forward(self, input_0): primals_1 = self.linear.weight primals_2 = self.linear.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Roxbili/T2T-ViT
Transition
false
9,445
[ "BSD-3-Clause-Clear" ]
0
c5442bc560ea15b421130f13e31c4b68f52c1e5a
https://github.com/Roxbili/T2T-ViT/tree/c5442bc560ea15b421130f13e31c4b68f52c1e5a
Squash
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/ym/cym3ccudngmvatoe6w7myo62gop3lu5z7s5oobh4dsa5ywnpodvj.py # Topologically Sorted Source Nodes: [pow_1, s2, add, truediv, add_1, sqrt, truediv_1, mul], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul] # Source node to ATen node mapping: # add => add # add_1 => add_1 # mul => mul # pow_1 => pow_1 # s2 => sum_1 # sqrt => sqrt # truediv => div # truediv_1 => div_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {}) # %sum_1 : [num_users=3] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %add), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-08), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %sqrt), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_1), kwargs = {}) triton_poi_fused_add_div_mul_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_add_div_mul_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 4) x2 = xindex tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp10 / tmp12 tmp15 = 1e-08 tmp16 = tmp10 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp14 / tmp17 tmp19 = tmp13 * tmp18 tl.store(out_ptr0 + (x2), tmp19, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, s2, add, truediv, add_1, sqrt, truediv_1, mul], Original ATen: [aten.pow, aten.sum, aten.add, aten.div, aten.sqrt, aten.mul] stream0 = get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0) del arg0_1 return (buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module import torch.utils.data import torch.nn.functional import torch.autograd assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mul_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 x2 = xindex tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp8 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp14 = tl.load(in_ptr0 + x2, xmask) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 1.0 tmp12 = tmp10 + tmp11 tmp13 = tmp10 / tmp12 tmp15 = 1e-08 tmp16 = tmp10 + tmp15 tmp17 = libdevice.sqrt(tmp16) tmp18 = tmp14 / tmp17 tmp19 = tmp13 * tmp18 tl.store(out_ptr0 + x2, tmp19, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mul_pow_sqrt_sum_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, class SquashNew(Module): '\n ## Squash\n\n This is **squashing** function from paper, given by equation $(1)$.\n\n $$\\mathbf{v}_j = \x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}\n \x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$$\n\n $\x0crac{\\mathbf{s}_j}{\\lVert \\mathbf{s}_j \rVert}$\n normalizes the length of all the capsules, whilst\n $\x0crac{{\\lVert \\mathbf{s}_j \rVert}^2}{1 + {\\lVert \\mathbf{s}_j \rVert}^2}$\n shrinks the capsules that have a length smaller than one .\n ' def __init__(self, epsilon=1e-08): super().__init__() self.epsilon = epsilon def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
ppvalluri09/annotated_deep_learning_paper_implementations
Squash
false
11,078
[ "MIT" ]
0
387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
https://github.com/ppvalluri09/annotated_deep_learning_paper_implementations/tree/387b6dfd1ef1f6d295e9394c24b5798071d9a3e4
ContrastiveLoss
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_1/inductor_cache/v2/cv2ohk4vpat6jimblbxldlcmj6xwbdtpffu3zhdixmnvm47unics.py # Topologically Sorted Source Nodes: [sub, pow_1, distances], Original ATen: [aten.sub, aten.pow, aten.sum] # Source node to ATen node mapping: # distances => sum_1 # pow_1 => pow_1 # sub => sub # Graph fragment: # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {}) # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {}) # %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {}) triton_poi_fused_pow_sub_sum_0 = async_compile.triton('triton_poi_fused_pow_sub_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = (xindex // 16) x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask) tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask) tmp5 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask) tmp10 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask) tmp15 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + (x2), tmp18, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_1/inductor_cache/zr/czr2vrtmp2zzmoldsxojgyuiyfeueaymvvtixpuppz45iypjtjb3.py # Topologically Sorted Source Nodes: [mul, mul_1, add, add_1, sqrt, sub_1, relu, pow_2, mul_2, add_2, losses, mean], Original ATen: [aten.mul, aten.add, aten.sqrt, aten.rsub, aten.relu, aten.pow, aten.mean] # Source node to ATen node mapping: # add => add # add_1 => add_1 # add_2 => add_2 # losses => mul_3 # mean => mean # mul => mul # mul_1 => mul_1 # mul_2 => mul_2 # pow_2 => pow_2 # relu => relu # sqrt => sqrt # sub_1 => sub_1 # Graph fragment: # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, %sum_1), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg2_1, -1), kwargs = {}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1e-09), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {}) # %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (4, %sqrt), kwargs = {}) # %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%sub_1,), kwargs = {}) # %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%relu, 2), kwargs = {}) # %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %pow_2), kwargs = {}) # %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_2), kwargs = {}) # %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_2, 0.5), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_3,), kwargs = {}) triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1 = async_compile.triton('triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.persistent_reduction( size_hints=[1, 256], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False} ) @triton.jit def triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): xnumel = 1 XBLOCK: tl.constexpr = 1 rnumel = 256 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] roffset = 0 rmask = tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + (r2), None) tmp1 = tl.load(in_ptr1 + (r0), None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = -1.0 tmp4 = tmp0 * tmp3 tmp5 = 1.0 tmp6 = tmp4 + tmp5 tmp7 = 1e-09 tmp8 = tmp1 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = 4.0 tmp11 = tmp10 - tmp9 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tmp13 * tmp13 tmp15 = tmp6 * tmp14 tmp16 = tmp2 + tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = tl.broadcast_to(tmp18, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = 256.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [sub, pow_1, distances], Original ATen: [aten.sub, aten.pow, aten.sum] stream0 = get_raw_stream(0) triton_poi_fused_pow_sub_sum_0.run(arg0_1, arg1_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [mul, mul_1, add, add_1, sqrt, sub_1, relu, pow_2, mul_2, add_2, losses, mean], Original ATen: [aten.mul, aten.add, aten.sqrt, aten.rsub, aten.relu, aten.pow, aten.mean] triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1.run(buf2, arg2_1, buf0, 1, 256, grid=grid(1), stream=stream0) del arg2_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1, arg1_1, arg2_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_pow_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 16 x1 = xindex // 16 x2 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask) tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask) tmp4 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask) tmp5 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask) tmp9 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask) tmp10 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask) tmp14 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask) tmp15 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask) tmp2 = tmp0 - tmp1 tmp3 = tmp2 * tmp2 tmp6 = tmp4 - tmp5 tmp7 = tmp6 * tmp6 tmp8 = tmp3 + tmp7 tmp11 = tmp9 - tmp10 tmp12 = tmp11 * tmp11 tmp13 = tmp8 + tmp12 tmp16 = tmp14 - tmp15 tmp17 = tmp16 * tmp16 tmp18 = tmp13 + tmp17 tl.store(out_ptr0 + x2, tmp18, xmask) @triton.jit def triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r2 = rindex r0 = rindex % 64 tmp0 = tl.load(in_ptr0 + r2, None) tmp1 = tl.load(in_ptr1 + r0, None, eviction_policy='evict_last') tmp2 = tmp0 * tmp1 tmp3 = -1.0 tmp4 = tmp0 * tmp3 tmp5 = 1.0 tmp6 = tmp4 + tmp5 tmp7 = 1e-09 tmp8 = tmp1 + tmp7 tmp9 = libdevice.sqrt(tmp8) tmp10 = 4.0 tmp11 = tmp10 - tmp9 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tmp14 = tmp13 * tmp13 tmp15 = tmp6 * tmp14 tmp16 = tmp2 + tmp15 tmp17 = 0.5 tmp18 = tmp16 * tmp17 tmp19 = tl.broadcast_to(tmp18, [RBLOCK]) tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0)) tmp22 = 256.0 tmp23 = tmp21 / tmp22 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_pow_sub_sum_0[grid(64)](arg0_1, arg1_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((), (), torch.float32) buf2 = buf1 del buf1 triton_per_fused_add_mean_mul_pow_relu_rsub_sqrt_1[grid(1)](buf2, arg2_1, buf0, 1, 256, num_warps=2, num_stages=1) del arg2_1 del buf0 return buf2, class ContrastiveLossNew(nn.Module): """ Contrastive loss Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise """ def __init__(self, margin): super(ContrastiveLossNew, self).__init__() self.margin = margin self.eps = 1e-09 def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
EmmaW8/EISNet
ContrastiveLoss
false
8,038
[ "MIT" ]
40
97c420d6763c5f825e88ed732edee4e75f3b947e
https://github.com/EmmaW8/EISNet/tree/97c420d6763c5f825e88ed732edee4e75f3b947e
ResBlock
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/vv/cvvvykjkjppmkrmwigtttsyehcevbj5t7jythhbra7doe7uqz5nb.py # Topologically Sorted Source Nodes: [conv2d, residual], Original ATen: [aten.convolution, aten.leaky_relu] # Source node to ATen node mapping: # conv2d => convolution # residual => gt, mul, where # Graph fragment: # %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {}) # %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {}) triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 64 tmp0 = tl.load(in_out_ptr0 + (x3), None) tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + (x3), tmp7, None) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/sx/csxcria5xwi4ihtg7e37ggfcyrrviugfpusq5rclo6idodbufbsp.py # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.cat] # Source node to ATen node mapping: # input_1 => cat # Graph fragment: # %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_3, %where_1], 1), kwargs = {}) triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4352 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = (xindex // 16) % 68 x0 = xindex % 16 x2 = (xindex // 1088) x3 = xindex tmp0 = x1 tmp1 = tl.full([1], 0, tl.int64) tmp2 = tmp0 >= tmp1 tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tmp7 = tl.full([1], 68, tl.int64) tmp8 = tmp0 < tmp7 tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (1024*x2)), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr2 + ((-4) + x1), tmp6 & xmask, eviction_policy='evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = 0.2 tmp15 = tmp11 * tmp14 tmp16 = tl.where(tmp13, tmp11, tmp15) tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp6, tmp16, tmp17) tmp19 = tl.where(tmp4, tmp5, tmp18) tl.store(out_ptr0 + (x3), tmp19, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/w5/cw5gytijzzkwnfpq2a2axdsj4pfxgxmwiuzizuyd4bw5uwnanzw7.py # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] # Source node to ATen node mapping: # out => convolution_2 # Graph fragment: # %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_6, %primals_7, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = (xindex // 16) % 4 tmp0 = tl.load(in_out_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x3), tmp2, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/uq/cuqaxink657vmvuhhizwmmevtg2uvs4a3dm6naooaakhjpteqtvc.py # Topologically Sorted Source Nodes: [conv2d_1, residual_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] # Source node to ATen node mapping: # conv2d_1 => convolution_1 # residual_1 => gt_1, mul_1, where_1 # Graph fragment: # %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {}) # %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {}) # %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {}) # %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%where_1, 0), kwargs = {}) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4096], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 4096 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = (xindex // 16) % 64 tmp0 = tl.load(in_ptr0 + (x3), None) tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + (x3), tmp8, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64, ), (1, )) assert_size_stride(primals_6, (4, 68, 3, 3), (612, 9, 3, 1)) assert_size_stride(primals_7, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv2d, residual], Original ATen: [aten.convolution, aten.leaky_relu] stream0 = get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0.run(buf1, primals_2, 4096, grid=grid(4096), stream=stream0) del primals_2 # Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution] buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1)) buf3 = empty_strided_cuda((4, 68, 4, 4), (1088, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [input_1], Original ATen: [aten.cat] triton_poi_fused_cat_1.run(primals_3, buf2, primals_5, buf3, 4352, grid=grid(4352), stream=stream0) # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4; del buf4 # reuse # Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution] triton_poi_fused_convolution_2.run(buf5, primals_7, 256, grid=grid(256), stream=stream0) del primals_7 buf6 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool) # Topologically Sorted Source Nodes: [conv2d_1, residual_1], Original ATen: [aten.convolution, aten.leaky_relu, aten.leaky_relu_backward] triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3.run(buf2, primals_5, buf6, 4096, grid=grid(4096), stream=stream0) del buf2 del primals_5 return (buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf6, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((64, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((4, 68, 3, 3), (612, 9, 3, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_convolution_leaky_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_out_ptr0 + x3, None) tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tl.store(in_out_ptr0 + x3, tmp7, None) @triton.jit def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 4352 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 16 % 68 x0 = xindex % 16 x2 = xindex // 1088 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0) tmp6 = tmp0 >= tmp3 tl.full([1], 68, tl.int64) tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 1024 * x2), tmp6 & xmask, other=0.0) tmp10 = tl.load(in_ptr2 + (-4 + x1), tmp6 & xmask, eviction_policy= 'evict_last', other=0.0) tmp11 = tmp9 + tmp10 tmp12 = 0.0 tmp13 = tmp11 > tmp12 tmp14 = 0.2 tmp15 = tmp11 * tmp14 tmp16 = tl.where(tmp13, tmp11, tmp15) tmp17 = tl.full(tmp16.shape, 0.0, tmp16.dtype) tmp18 = tl.where(tmp6, tmp16, tmp17) tmp19 = tl.where(tmp4, tmp5, tmp18) tl.store(out_ptr0 + x3, tmp19, xmask) @triton.jit def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 16 % 4 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = 0.0 tmp4 = tmp2 > tmp3 tmp5 = 0.2 tmp6 = tmp2 * tmp5 tmp7 = tl.where(tmp4, tmp2, tmp6) tmp8 = tmp7 > tmp3 tl.store(out_ptr0 + x3, tmp8, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (64, 4, 3, 3), (36, 9, 3, 1)) assert_size_stride(primals_2, (64,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_5, (64,), (1,)) assert_size_stride(primals_6, (4, 68, 3, 3), (612, 9, 3, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf0, (4, 64, 4, 4), (1024, 16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_leaky_relu_0[grid(4096)](buf1, primals_2, 4096, XBLOCK=256, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 64, 4, 4), (1024, 16, 4, 1)) buf3 = empty_strided_cuda((4, 68, 4, 4), (1088, 16, 4, 1), torch. float32) triton_poi_fused_cat_1[grid(4352)](primals_3, buf2, primals_5, buf3, 4352, XBLOCK=256, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 4, 4, 4), (64, 16, 4, 1)) buf5 = buf4 del buf4 triton_poi_fused_convolution_2[grid(256)](buf5, primals_7, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((4, 64, 4, 4), (1024, 16, 4, 1), torch.bool) triton_poi_fused_convolution_leaky_relu_leaky_relu_backward_3[grid( 4096)](buf2, primals_5, buf6, 4096, XBLOCK=256, num_warps=4, num_stages=1) del buf2 del primals_5 return buf5, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf6 class ResBlockNew(nn.Module): def __init__(self, channel_in, channel_out): super(ResBlockNew, self).__init__() feature = 64 self.conv1 = nn.Conv2d(channel_in, feature, kernel_size=3, padding=1) self.relu1 = nn.LeakyReLU(negative_slope=0.2, inplace=True) self.conv2 = nn.Conv2d(feature, feature, kernel_size=3, padding=1) self.conv3 = nn.Conv2d(feature + channel_in, channel_out, kernel_size=3, padding=1) def forward(self, input_0): primals_1 = self.conv1.weight primals_2 = self.conv1.bias primals_4 = self.conv2.weight primals_5 = self.conv2.bias primals_6 = self.conv3.weight primals_7 = self.conv3.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0]
WestCityInstitute/InvDN
ResBlock
false
14,608
[ "Apache-2.0" ]
122
3846cf3548ccf6690e58be3aafe1f6d98c56b90d
https://github.com/WestCityInstitute/InvDN/tree/3846cf3548ccf6690e58be3aafe1f6d98c56b90d
MixtureDensityHead
from _paritybench_helpers import _mock_config import torch import torch.nn as nn from torch.autograd import Variable from torch.distributions import Categorical class MixtureDensityHead(nn.Module): def __init__(self, config: 'DictConfig', **kwargs): self.hparams = config super().__init__() self._build_network() def _build_network(self): self.pi = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian) nn.init.normal_(self.pi.weight) self.sigma = nn.Linear(self.hparams.input_dim, self.hparams. num_gaussian, bias=self.hparams.sigma_bias_flag) self.mu = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian) nn.init.normal_(self.mu.weight) if self.hparams.mu_bias_init is not None: for i, bias in enumerate(self.hparams.mu_bias_init): nn.init.constant_(self.mu.bias[i], bias) def forward(self, x): pi = self.pi(x) sigma = self.sigma(x) sigma = nn.ELU()(sigma) + 1 + 1e-15 mu = self.mu(x) return pi, sigma, mu def gaussian_probability(self, sigma, mu, target, log=False): """Returns the probability of `target` given MoG parameters `sigma` and `mu`. Arguments: sigma (BxGxO): The standard deviation of the Gaussians. B is the batch size, G is the number of Gaussians, and O is the number of dimensions per Gaussian. mu (BxGxO): The means of the Gaussians. B is the batch size, G is the number of Gaussians, and O is the number of dimensions per Gaussian. target (BxI): A batch of target. B is the batch size and I is the number of input dimensions. Returns: probabilities (BxG): The probability of each point in the probability of the distribution in the corresponding sigma/mu index. """ target = target.expand_as(sigma) if log: ret = -torch.log(sigma) - 0.5 * LOG2PI - 0.5 * torch.pow(( target - mu) / sigma, 2) else: ret = ONEOVERSQRT2PI / sigma * torch.exp(-0.5 * ((target - mu) / sigma) ** 2) return ret def log_prob(self, pi, sigma, mu, y): log_component_prob = self.gaussian_probability(sigma, mu, y, log=True) log_mix_prob = torch.log(nn.functional.gumbel_softmax(pi, tau=self. hparams.softmax_temperature, dim=-1) + 1e-15) return torch.logsumexp(log_component_prob + log_mix_prob, dim=-1) def sample(self, pi, sigma, mu): """Draw samples from a MoG.""" categorical = Categorical(pi) pis = categorical.sample().unsqueeze(1) sample = Variable(sigma.data.new(sigma.size(0), 1).normal_()) sample = sample * sigma.gather(1, pis) + mu.gather(1, pis) return sample def generate_samples(self, pi, sigma, mu, n_samples=None): if n_samples is None: n_samples = self.hparams.n_samples samples = [] softmax_pi = nn.functional.gumbel_softmax(pi, tau=self.hparams. softmax_temperature, dim=-1) assert (softmax_pi < 0).sum().item( ) == 0, 'pi parameter should not have negative' for _ in range(n_samples): samples.append(self.sample(softmax_pi, sigma, mu)) samples = torch.cat(samples, dim=1) return samples def generate_point_predictions(self, pi, sigma, mu, n_samples=None): samples = self.generate_samples(pi, sigma, mu, n_samples) if self.hparams.central_tendency == 'mean': y_hat = torch.mean(samples, dim=-1) elif self.hparams.central_tendency == 'median': y_hat = torch.median(samples, dim=-1).values return y_hat.unsqueeze(1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'config': _mock_config(input_dim=4, num_gaussian=4, sigma_bias_flag=4, mu_bias_init=[4, 4])}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn from torch.autograd import Variable from torch.distributions import Categorical assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_elu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr ): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 0.0 tmp2 = tmp0 > tmp1 tmp3 = 1.0 tmp4 = tmp0 * tmp3 tmp5 = libdevice.expm1(tmp4) tmp6 = tmp5 * tmp3 tmp7 = tl.where(tmp2, tmp4, tmp6) tmp8 = tmp7 + tmp3 tmp9 = 1e-15 tmp10 = tmp8 + tmp9 tl.store(out_ptr0 + x0, tmp10, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7) = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 4), (4, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 4), (4, 1)) assert_size_stride(primals_7, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf1) del primals_4 del primals_5 buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_elu_0[grid(256)](buf1, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_7, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf3) del primals_6 del primals_7 return reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), buf2, reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1 class MixtureDensityHeadNew(nn.Module): def __init__(self, config: 'DictConfig', **kwargs): self.hparams = config super().__init__() self._build_network() def _build_network(self): self.pi = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian) nn.init.normal_(self.pi.weight) self.sigma = nn.Linear(self.hparams.input_dim, self.hparams. num_gaussian, bias=self.hparams.sigma_bias_flag) self.mu = nn.Linear(self.hparams.input_dim, self.hparams.num_gaussian) nn.init.normal_(self.mu.weight) if self.hparams.mu_bias_init is not None: for i, bias in enumerate(self.hparams.mu_bias_init): nn.init.constant_(self.mu.bias[i], bias) def gaussian_probability(self, sigma, mu, target, log=False): """Returns the probability of `target` given MoG parameters `sigma` and `mu`. Arguments: sigma (BxGxO): The standard deviation of the Gaussians. B is the batch size, G is the number of Gaussians, and O is the number of dimensions per Gaussian. mu (BxGxO): The means of the Gaussians. B is the batch size, G is the number of Gaussians, and O is the number of dimensions per Gaussian. target (BxI): A batch of target. B is the batch size and I is the number of input dimensions. Returns: probabilities (BxG): The probability of each point in the probability of the distribution in the corresponding sigma/mu index. """ target = target.expand_as(sigma) if log: ret = -torch.log(sigma) - 0.5 * LOG2PI - 0.5 * torch.pow(( target - mu) / sigma, 2) else: ret = ONEOVERSQRT2PI / sigma * torch.exp(-0.5 * ((target - mu) / sigma) ** 2) return ret def log_prob(self, pi, sigma, mu, y): log_component_prob = self.gaussian_probability(sigma, mu, y, log=True) log_mix_prob = torch.log(nn.functional.gumbel_softmax(pi, tau=self. hparams.softmax_temperature, dim=-1) + 1e-15) return torch.logsumexp(log_component_prob + log_mix_prob, dim=-1) def sample(self, pi, sigma, mu): """Draw samples from a MoG.""" categorical = Categorical(pi) pis = categorical.sample().unsqueeze(1) sample = Variable(sigma.data.new(sigma.size(0), 1).normal_()) sample = sample * sigma.gather(1, pis) + mu.gather(1, pis) return sample def generate_samples(self, pi, sigma, mu, n_samples=None): if n_samples is None: n_samples = self.hparams.n_samples samples = [] softmax_pi = nn.functional.gumbel_softmax(pi, tau=self.hparams. softmax_temperature, dim=-1) assert (softmax_pi < 0).sum().item( ) == 0, 'pi parameter should not have negative' for _ in range(n_samples): samples.append(self.sample(softmax_pi, sigma, mu)) samples = torch.cat(samples, dim=1) return samples def generate_point_predictions(self, pi, sigma, mu, n_samples=None): samples = self.generate_samples(pi, sigma, mu, n_samples) if self.hparams.central_tendency == 'mean': y_hat = torch.mean(samples, dim=-1) elif self.hparams.central_tendency == 'median': y_hat = torch.median(samples, dim=-1).values return y_hat.unsqueeze(1) def forward(self, input_0): primals_1 = self.pi.weight primals_2 = self.pi.bias primals_4 = self.sigma.weight primals_5 = self.sigma.bias primals_6 = self.mu.weight primals_7 = self.mu.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7]) return output[0], output[1], output[2]
robburdon/pytorch_tabular
MixtureDensityHead
false
16,685
[ "MIT" ]
560
9bf75f22c6e1b3033ad699713e77c283d55f3555
https://github.com/robburdon/pytorch_tabular/tree/9bf75f22c6e1b3033ad699713e77c283d55f3555
LayerNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_7/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # ret => add, rsqrt, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + (x0), tmp8, xmask) tl.store(out_ptr1 + (x0), tmp23, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_7/inductor_cache/lh/clhh73owbiuj4adasmetdqsot2nlmw2ljupnw2q4yt3du76mikww.py # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm] # Source node to ATen node mapping: # ret => add, add_1, mul, mul_1, rsqrt, sub, var_mean # Graph fragment: # %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_1, [3]), kwargs = {correction: 0, keepdim: True}) # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {}) # %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {}) # %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %getitem_1), kwargs = {}) # %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {}) # %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {}) # %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {}) triton_poi_fused_native_layer_norm_1 = async_compile.triton('triton_poi_fused_native_layer_norm_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm] stream0 = get_raw_stream(0) triton_poi_fused_native_layer_norm_0.run(primals_1, buf0, buf1, 64, grid=grid(64), stream=stream0) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [ret], Original ATen: [aten.native_layer_norm] triton_poi_fused_native_layer_norm_1.run(primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0) del buf0 del buf1 del primals_2 del primals_3 return (buf2, primals_1, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp7 = 4.0 tmp8 = tmp6 / tmp7 tmp9 = tmp0 - tmp8 tmp10 = tmp9 * tmp9 tmp11 = tmp1 - tmp8 tmp12 = tmp11 * tmp11 tmp13 = tmp10 + tmp12 tmp14 = tmp3 - tmp8 tmp15 = tmp14 * tmp14 tmp16 = tmp13 + tmp15 tmp17 = tmp5 - tmp8 tmp18 = tmp17 * tmp17 tmp19 = tmp16 + tmp18 tmp20 = tmp19 / tmp7 tmp21 = 1e-05 tmp22 = tmp20 + tmp21 tmp23 = libdevice.rsqrt(tmp22) tl.store(out_ptr0 + x0, tmp8, xmask) tl.store(out_ptr1 + x0, tmp23, xmask) @triton.jit def triton_poi_fused_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32) get_raw_stream(0) triton_poi_fused_native_layer_norm_0[grid(64)](primals_1, buf0, buf1, 64, XBLOCK=64, num_warps=1, num_stages=1) buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) triton_poi_fused_native_layer_norm_1[grid(256)](primals_1, buf0, buf1, primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1) del buf0 del buf1 del primals_2 del primals_3 return buf2, primals_1 class LayerNormNew(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, input_0): primals_2 = self.weight primals_3 = self.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Artanic30/RentalPrediction
LayerNorm
false
1,982
[ "MIT" ]
0
5804ab9b453d2a40bce2bb304c31efc98a803ed8
https://github.com/Artanic30/RentalPrediction/tree/5804ab9b453d2a40bce2bb304c31efc98a803ed8
RingLoss
import torch import warnings import torch.nn as nn from torchvision.transforms import * class RingLoss(nn.Module): """Ring loss. Reference: Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. """ def __init__(self): super(RingLoss, self).__init__() warnings.warn('This method is deprecated') self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) def forward(self, x): loss = ((x.norm(p=2, dim=1) - self.radius) ** 2).mean() return loss def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import warnings import torch.nn as nn from torchvision.transforms import * assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex % 16 r1 = rindex // 16 r2 = rindex tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None) tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None) tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None) tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None) tmp12 = tl.load(in_ptr1 + 0) tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK]) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = libdevice.sqrt(tmp10) tmp14 = tmp11 - tmp13 tmp15 = 2.0 tmp16 = tmp14 * tmp15 tmp17 = tmp14 * tmp14 tmp18 = tl.broadcast_to(tmp17, [XBLOCK, RBLOCK]) tmp20 = tl.sum(tmp18, 1)[:, None] tmp21 = 64.0 tmp22 = tmp20 / tmp21 tl.store(out_ptr1 + tl.broadcast_to(r2, [XBLOCK, RBLOCK]), tmp16, None) tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp22, None) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32) buf1 = empty_strided_cuda((), (), torch.float32) buf3 = buf1 del buf1 get_raw_stream(0) triton_per_fused_linalg_vector_norm_mean_mul_pow_sub_0[grid(1)](buf3, primals_1, primals_2, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del primals_1 del primals_2 return buf3, buf2 class RingLossNew(nn.Module): """Ring loss. Reference: Zheng et al. Ring loss: Convex Feature Normalization for Face Recognition. CVPR 2018. """ def __init__(self): super(RingLossNew, self).__init__() warnings.warn('This method is deprecated') self.radius = nn.Parameter(torch.ones(1, dtype=torch.float)) def forward(self, input_0): primals_2 = self.radius primals_1 = input_0 output = call([primals_1, primals_2]) return output[0]
xijiali/ABD_Net
RingLoss
false
4,579
[ "MIT" ]
0
8d2d9b316b7c181ce441ceb4b1c62fb9a6d53153
https://github.com/xijiali/ABD_Net/tree/8d2d9b316b7c181ce441ceb4b1c62fb9a6d53153
FactorTransfer
import torch from torch import nn import torch.nn.functional as F class FactorTransfer(nn.Module): """Paraphrasing Complex Network: Network Compression via Factor Transfer, NeurIPS 2018""" def __init__(self, p1=2, p2=1): super(FactorTransfer, self).__init__() self.p1 = p1 self.p2 = p2 def forward(self, f_s, f_t): return self.factor_loss(f_s, f_t) def factor_loss(self, f_s, f_t): s_H, t_H = f_s.shape[2], f_t.shape[2] if s_H > t_H: f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H)) elif s_H < t_H: f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H)) else: pass if self.p2 == 1: return (self.factor(f_s) - self.factor(f_t)).abs().mean() else: return (self.factor(f_s) - self.factor(f_t)).pow(self.p2).mean() def factor(self, f): return F.normalize(f.pow(self.p1).mean(1).view(f.size(0), -1)) def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch import nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_div_linalg_vector_norm_sub_0(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0) tmp2 = tl.load(in_ptr0 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp5 = tl.load(in_ptr0 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp8 = tl.load(in_ptr0 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp18 = tl.load(in_ptr1 + (r1 + 64 * x0), xmask, other=0.0) tmp20 = tl.load(in_ptr1 + (16 + r1 + 64 * x0), xmask, other=0.0) tmp23 = tl.load(in_ptr1 + (32 + r1 + 64 * x0), xmask, other=0.0) tmp26 = tl.load(in_ptr1 + (48 + r1 + 64 * x0), xmask, other=0.0) tmp1 = tmp0 * tmp0 tmp3 = tmp2 * tmp2 tmp4 = tmp1 + tmp3 tmp6 = tmp5 * tmp5 tmp7 = tmp4 + tmp6 tmp9 = tmp8 * tmp8 tmp10 = tmp7 + tmp9 tmp11 = 4.0 tmp12 = tmp10 / tmp11 tmp13 = tmp12 * tmp12 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tmp19 = tmp18 * tmp18 tmp21 = tmp20 * tmp20 tmp22 = tmp19 + tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 + tmp24 tmp27 = tmp26 * tmp26 tmp28 = tmp25 + tmp27 tmp29 = tmp28 / tmp11 tmp30 = tmp29 * tmp29 tmp31 = tl.broadcast_to(tmp30, [XBLOCK, RBLOCK]) tmp33 = tl.where(xmask, tmp31, 0) tmp34 = tl.sum(tmp33, 1)[:, None] tmp35 = libdevice.sqrt(tmp17) tmp36 = 1e-12 tmp37 = triton_helpers.maximum(tmp35, tmp36) tmp38 = tmp12 / tmp37 tmp39 = libdevice.sqrt(tmp34) tmp40 = triton_helpers.maximum(tmp39, tmp36) tmp41 = tmp29 / tmp40 tmp42 = tmp38 - tmp41 tl.store(out_ptr2 + (r1 + 16 * x0), tmp42, xmask) @triton.jit def triton_per_fused_abs_mean_1(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl_math.abs(tmp0) tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK]) tmp4 = tl.sum(tmp2, 1)[:, None] tmp5 = 64.0 tmp6 = tmp4 / tmp5 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf2 = empty_strided_cuda((4, 16), (16, 1), torch.float32) get_raw_stream(0) triton_per_fused_div_linalg_vector_norm_sub_0[grid(4)](arg0_1, arg1_1, buf2, 4, 16, XBLOCK=1, num_warps=2, num_stages=1) del arg0_1 del arg1_1 buf3 = empty_strided_cuda((), (), torch.float32) buf4 = buf3 del buf3 triton_per_fused_abs_mean_1[grid(1)](buf4, buf2, 1, 64, XBLOCK=1, num_warps=2, num_stages=1) del buf2 return buf4, class FactorTransferNew(nn.Module): """Paraphrasing Complex Network: Network Compression via Factor Transfer, NeurIPS 2018""" def __init__(self, p1=2, p2=1): super(FactorTransferNew, self).__init__() self.p1 = p1 self.p2 = p2 def factor_loss(self, f_s, f_t): s_H, t_H = f_s.shape[2], f_t.shape[2] if s_H > t_H: f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H)) elif s_H < t_H: f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H)) else: pass if self.p2 == 1: return (self.factor(f_s) - self.factor(f_t)).abs().mean() else: return (self.factor(f_s) - self.factor(f_t)).pow(self.p2).mean() def factor(self, f): return F.normalize(f.pow(self.p1).mean(1).view(f.size(0), -1)) def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kctsiolis/RepDistiller
FactorTransfer
false
3,930
[ "BSD-2-Clause" ]
0
ce88f6e53fcf8ef81c5bac2d20ad31628dd279ac
https://github.com/kctsiolis/RepDistiller/tree/ce88f6e53fcf8ef81c5bac2d20ad31628dd279ac
ConvNorm
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_4/inductor_cache/wm/cwmqlbvbics7z7kr4p5f3ne6rdnkbsrcrcxxrr56pqdyjkrxldj7.py # Topologically Sorted Source Nodes: [conv_signal], Original ATen: [aten.convolution] # Source node to ATen node mapping: # conv_signal => convolution # Graph fragment: # %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze, %primals_1, %primals_2, [1], [0], [1], False, [0], 1), kwargs = {}) triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[16], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_out_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x2), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # Topologically Sorted Source Nodes: [conv_signal], Original ATen: [aten.convolution] buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = buf0; del buf0 # reuse # Topologically Sorted Source Nodes: [conv_signal], Original ATen: [aten.convolution] stream0 = get_raw_stream(0) triton_poi_fused_convolution_0.run(buf1, primals_2, 16, grid=grid(16), stream=stream0) del primals_2 return (reinterpret_tensor(buf1, (4, 4), (4, 1), 0), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_out_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x2, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None) assert_size_stride(buf0, (1, 4, 4), (16, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_0[grid(16)](buf1, primals_2, 16, XBLOCK=16, num_warps=1, num_stages=1) del primals_2 return reinterpret_tensor(buf1, (4, 4), (4, 1), 0 ), primals_1, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0) class ConvNormNew(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=None, dilation=1, bias=True, w_init_gain='linear'): super(ConvNormNew, self).__init__() if padding is None: assert kernel_size % 2 == 1 padding = int(dilation * (kernel_size - 1) / 2) self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size= kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init. calculate_gain(w_init_gain)) def forward(self, input_0): primals_1 = self.conv.weight primals_2 = self.conv.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
CODEJIN/TacoSinger
ConvNorm
false
4,934
[ "MIT" ]
1
af58a8f4e8b20e8817990f28a3ba22168c853655
https://github.com/CODEJIN/TacoSinger/tree/af58a8f4e8b20e8817990f28a3ba22168c853655
MeanPooling
import torch from torch import nn class MeanPooling(nn.Module): def __init__(self): super(MeanPooling, self).__init__() def forward(self, doc_state, entity_mapping, entity_lens): entity_states = entity_mapping.unsqueeze(3) * doc_state.unsqueeze(1) mean_pooled = torch.sum(entity_states, dim=2) / entity_lens.unsqueeze(2 ) return mean_pooled def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand( [4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch import nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x4 = xindex // 16 x3 = xindex // 64 x5 = xindex % 16 x6 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp1 = tl.load(in_ptr1 + (x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (4 + x0 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp4 = tl.load(in_ptr1 + (16 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp7 = tl.load(in_ptr0 + (8 + x0 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp8 = tl.load(in_ptr1 + (32 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp11 = tl.load(in_ptr0 + (12 + x0 + 16 * x4), xmask, eviction_policy= 'evict_last') tmp12 = tl.load(in_ptr1 + (48 + x5 + 64 * x3), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 * tmp1 tmp5 = tmp3 * tmp4 tmp6 = tmp2 + tmp5 tmp9 = tmp7 * tmp8 tmp10 = tmp6 + tmp9 tmp13 = tmp11 * tmp12 tmp14 = tmp10 + tmp13 tl.store(out_ptr0 + x6, tmp14, xmask) @triton.jit def triton_poi_fused_div_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 1024 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex % 256 x0 = xindex % 16 x5 = xindex // 64 x6 = xindex tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + (x0 + 16 * x5), xmask, eviction_policy= 'evict_last') tmp2 = tmp0 / tmp1 tl.store(out_ptr0 + x6, tmp2, xmask) def call(args): arg0_1, arg1_1, arg2_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_sum_0[grid(256)](arg0_1, arg1_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del arg0_1 del arg1_1 buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32) triton_poi_fused_div_mul_sum_1[grid(1024)](buf0, arg2_1, buf1, 1024, XBLOCK=128, num_warps=4, num_stages=1) del arg2_1 del buf0 return buf1, class MeanPoolingNew(nn.Module): def __init__(self): super(MeanPoolingNew, self).__init__() def forward(self, input_0, input_1, input_2): arg0_1 = input_0 arg1_1 = input_1 arg2_1 = input_2 output = call([arg0_1, arg1_1, arg2_1]) return output[0]
jennybae1024/DFGN-pytorch
MeanPooling
false
15,683
[ "MIT" ]
191
056d9317f772cd10bdd215bfafdbac5cbd330026
https://github.com/jennybae1024/DFGN-pytorch/tree/056d9317f772cd10bdd215bfafdbac5cbd330026
EncoderImagePrecomp
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_8/inductor_cache/7y/c7yvmseinx6mtn7syc332l4dh5naxbr76mdf6jwiyvy5l3xzedwc.py # Topologically Sorted Source Nodes: [pow_1, sum_1, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.div] # Source node to ATen node mapping: # X => div # norm => sqrt # pow_1 => pow_1 # sum_1 => sum_1 # Graph fragment: # %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%view_1, 2), kwargs = {}) # %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {}) # %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%sum_1,), kwargs = {}) # %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_1, %sqrt), kwargs = {}) triton_poi_fused_div_pow_sqrt_sum_0 = async_compile.triton('triton_poi_fused_div_pow_sqrt_sum_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_pow_sqrt_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = (xindex // 64) tmp0 = tl.load(in_ptr0 + (x3), xmask) tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + (x3), tmp13, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [features], Original ATen: [aten.addmm] extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) # Topologically Sorted Source Nodes: [pow_1, sum_1, norm, X], Original ATen: [aten.pow, aten.sum, aten.sqrt, aten.div] stream0 = get_raw_stream(0) triton_poi_fused_div_pow_sqrt_sum_0.run(buf0, buf1, 256, grid=grid(256), stream=stream0) return (buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import numpy as np from collections import OrderedDict import torch.nn as nn import torch.nn.init assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_div_pow_sqrt_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x0 = xindex % 16 x2 = xindex // 64 tmp0 = tl.load(in_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy= 'evict_last') tmp2 = tmp1 * tmp1 tmp4 = tmp3 * tmp3 tmp5 = tmp2 + tmp4 tmp7 = tmp6 * tmp6 tmp8 = tmp5 + tmp7 tmp10 = tmp9 * tmp9 tmp11 = tmp8 + tmp10 tmp12 = libdevice.sqrt(tmp11) tmp13 = tmp0 / tmp12 tl.store(out_ptr0 + x3, tmp13, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0 ), alpha=1, beta=1, out=buf0) del primals_1 del primals_2 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_div_pow_sqrt_sum_0[grid(256)](buf0, buf1, 256, XBLOCK=256, num_warps=4, num_stages=1) return buf1, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf0 def l2norm(X): """L2-normalize columns of X """ norm = torch.pow(X, 2).sum(dim=1, keepdim=True).sqrt() X = torch.div(X, norm) return X class EncoderImagePrecompNew(nn.Module): def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False): super(EncoderImagePrecompNew, self).__init__() self.embed_size = embed_size self.no_imgnorm = no_imgnorm self.use_abs = use_abs self.fc = nn.Linear(img_dim, embed_size) self.init_weights() def init_weights(self): """Xavier initialization for the fully connected layer """ r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features) self.fc.weight.data.uniform_(-r, r) self.fc.bias.data.fill_(0) def load_state_dict(self, state_dict): """Copies parameters. overwritting the default one to accept state_dict from Full model """ own_state = self.state_dict() new_state = OrderedDict() for name, param in state_dict.items(): if name in own_state: new_state[name] = param super(EncoderImagePrecompNew, self).load_state_dict(new_state) def forward(self, input_0): primals_1 = self.fc.weight primals_2 = self.fc.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
ascott02/vsepp
EncoderImagePrecomp
false
9,736
[ "Apache-2.0" ]
0
c09abd2be5f1fec237ccfe3d7f41bfdea2acfde2
https://github.com/ascott02/vsepp/tree/c09abd2be5f1fec237ccfe3d7f41bfdea2acfde2
layer_normalization
import torch import torch.nn as nn class layer_normalization(nn.Module): def __init__(self, features, epsilon=1e-08): """Applies layer normalization. Args: epsilon: A floating number. A very small number for preventing ZeroDivision Error. """ super(layer_normalization, self).__init__() self.epsilon = epsilon self.gamma = nn.Parameter(torch.ones(features)) self.beta = nn.Parameter(torch.zeros(features)) def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.epsilon) + self.beta def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'features': 4}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_div_mean_mul_std_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 4 x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask) tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last') tmp4 = tmp2 + tmp3 tmp6 = tmp4 + tmp5 tmp8 = tmp6 + tmp7 tmp9 = 4.0 tmp10 = tmp8 / tmp9 tmp11 = tmp1 - tmp10 tmp12 = tmp0 * tmp11 tmp13 = tmp2 - tmp10 tmp14 = tmp13 * tmp13 tmp15 = tmp3 - tmp10 tmp16 = tmp15 * tmp15 tmp17 = tmp14 + tmp16 tmp18 = tmp5 - tmp10 tmp19 = tmp18 * tmp18 tmp20 = tmp17 + tmp19 tmp21 = tmp7 - tmp10 tmp22 = tmp21 * tmp21 tmp23 = tmp20 + tmp22 tmp24 = 3.0 tmp25 = tmp23 / tmp24 tmp26 = libdevice.sqrt(tmp25) tmp27 = 1e-08 tmp28 = tmp26 + tmp27 tmp29 = tmp12 / tmp28 tmp31 = tmp29 + tmp30 tl.store(out_ptr0 + x2, tmp31, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_div_mean_mul_std_sub_0[grid(256)](primals_2, primals_1, primals_3, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 del primals_3 return buf0, primals_1 class layer_normalizationNew(nn.Module): def __init__(self, features, epsilon=1e-08): """Applies layer normalization. Args: epsilon: A floating number. A very small number for preventing ZeroDivision Error. """ super(layer_normalizationNew, self).__init__() self.epsilon = epsilon self.gamma = nn.Parameter(torch.ones(features)) self.beta = nn.Parameter(torch.zeros(features)) def forward(self, input_0): primals_2 = self.gamma primals_3 = self.beta primals_1 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
Woodytse/transformer
layer_normalization
false
11,968
[ "MIT" ]
0
56f7c3051765e8cb3c34d2e9a41d483cec162256
https://github.com/Woodytse/transformer/tree/56f7c3051765e8cb3c34d2e9a41d483cec162256
Generator
import torch import torch.nn as nn import torch.nn.functional as F class Generator(nn.Module): """Define standard linear + softmax generation step.""" def __init__(self, hidden_size, vocab_size): super(Generator, self).__init__() self.proj = nn.Linear(hidden_size, vocab_size, bias=False) def forward(self, x): return F.log_softmax(self.proj(x), dim=-1) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'hidden_size': 4, 'vocab_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp2 = tl_math.exp(tmp1) tmp4 = tl_math.exp(tmp3) tmp5 = tmp2 + tmp4 tmp7 = tl_math.exp(tmp6) tmp8 = tmp5 + tmp7 tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp12 = tl_math.log(tmp11) tmp13 = tmp0 - tmp12 tl.store(out_ptr0 + x2, tmp13, xmask) def call(args): primals_1, primals_2 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(256)](buf0, buf1, 256, XBLOCK= 256, num_warps=4, num_stages=1) buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 triton_poi_fused__log_softmax_1[grid(256)](buf1, buf2, 256, XBLOCK= 256, num_warps=4, num_stages=1) del buf1 return buf2, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf2 class GeneratorNew(nn.Module): """Define standard linear + softmax generation step.""" def __init__(self, hidden_size, vocab_size): super(GeneratorNew, self).__init__() self.proj = nn.Linear(hidden_size, vocab_size, bias=False) def forward(self, input_0): primals_1 = self.proj.weight primals_2 = input_0 output = call([primals_1, primals_2]) return output[0]
Yixuan-Lee/yixuan-lee.github.io
Generator
false
2,983
[ "MIT" ]
0
139dd141544302ca1802a6104f7db7aeb1ace825
https://github.com/Yixuan-Lee/yixuan-lee.github.io/tree/139dd141544302ca1802a6104f7db7aeb1ace825
BCEDiceLossWithLogits
import torch import torch.nn as nn import torch.utils.data def flatten_samples(input_): """ Flattens a tensor or a variable such that the channel axis is first and the sample axis is second. The shapes are transformed as follows: (N, C, H, W) --> (C, N * H * W) (N, C, D, H, W) --> (C, N * D * H * W) (N, C) --> (C, N) The input must be atleast 2d. """ num_channels = input_.size(1) permute_axes = list(range(input_.dim())) permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0] permuted = input_.permute(*permute_axes).contiguous() flattened = permuted.view(num_channels, -1) return flattened def dice_score(input_, target, invert=False, channelwise=True, eps=1e-07): if channelwise: input_ = flatten_samples(input_) target = flatten_samples(target) numerator = (input_ * target).sum(-1) denominator = (input_ * input_).sum(-1) + (target * target).sum(-1) channelwise_score = 2 * (numerator / denominator.clamp(min=eps)) if invert: channelwise_score = 1.0 - channelwise_score score = channelwise_score.sum() else: numerator = (input_ * target).sum() denominator = (input_ * input_).sum() + (target * target).sum() score = 2.0 * (numerator / denominator.clamp(min=eps)) if invert: score = 1.0 - score return score class BCEDiceLossWithLogits(nn.Module): def __init__(self, alpha=1.0, beta=1.0, channelwise=True, eps=1e-07): super().__init__() self.alpha = alpha self.beta = beta self.channelwise = channelwise self.eps = eps self.init_kwargs = {'alpha': alpha, 'beta': beta, 'channelwise': channelwise, 'eps': self.eps} def forward(self, input_, target): loss_dice = dice_score(nn.functional.sigmoid(input_), target, invert=True, channelwise=self.channelwise, eps=self.eps) loss_bce = nn.functional.binary_cross_entropy_with_logits(input_, target) return self.alpha * loss_dice + self.beta * loss_bce def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math import torch.nn as nn import torch.utils.data assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_per_fused_mul_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr): xnumel = 4 RBLOCK: tl.constexpr = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask, other=0.0) tmp2 = tl.load(in_ptr1 + (16 * x0 + 64 * (r1 // 16) + r1 % 16), xmask, other=0.0) tmp1 = tl.sigmoid(tmp0) tmp3 = tmp1 * tmp2 tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK]) tmp6 = tl.where(xmask, tmp4, 0) tmp7 = tl.sum(tmp6, 1)[:, None] tmp8 = tmp1 * tmp1 tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK]) tmp11 = tl.where(xmask, tmp9, 0) tmp12 = tl.sum(tmp11, 1)[:, None] tmp13 = tmp2 * tmp2 tmp14 = tl.broadcast_to(tmp13, [XBLOCK, RBLOCK]) tmp16 = tl.where(xmask, tmp14, 0) tmp17 = tl.sum(tmp16, 1)[:, None] tl.store(out_ptr0 + x0, tmp7, xmask) tl.store(out_ptr1 + x0, tmp12, xmask) tl.store(out_ptr2 + x0, tmp17, xmask) @triton.jit def triton_per_fused_add_clamp_div_mul_rsub_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr): RBLOCK: tl.constexpr = 4 xoffset = tl.program_id(0) * XBLOCK xoffset + tl.arange(0, XBLOCK)[:, None] tl.full([XBLOCK, RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[None, :] tl.full([XBLOCK, RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp1 = tl.load(in_ptr1 + r0, None) tmp2 = tl.load(in_ptr2 + r0, None) tmp3 = tmp1 + tmp2 tmp4 = 1e-07 tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp6 = tmp0 / tmp5 tmp7 = 2.0 tmp8 = tmp6 * tmp7 tmp9 = 1.0 tmp10 = tmp9 - tmp8 tmp11 = tl.broadcast_to(tmp10, [XBLOCK, RBLOCK]) tmp13 = tl.sum(tmp11, 1)[:, None] tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp13, None) @triton.jit def triton_per_fused_add_binary_cross_entropy_with_logits_mul_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel): XBLOCK: tl.constexpr = 1 RBLOCK: tl.constexpr = 256 xoffset = tl.program_id(0) * XBLOCK tl.full([1], xoffset, tl.int32) tl.full([RBLOCK], True, tl.int1) rindex = tl.arange(0, RBLOCK)[:] tl.full([RBLOCK], True, tl.int1) r0 = rindex tmp0 = tl.load(in_ptr0 + r0, None) tmp3 = tl.load(in_ptr1 + r0, None) tmp16 = tl.load(in_out_ptr0 + 0) tmp17 = tl.broadcast_to(tmp16, [1]) tmp1 = 1.0 tmp2 = tmp1 - tmp0 tmp4 = tmp2 * tmp3 tmp5 = 0.0 tmp6 = triton_helpers.minimum(tmp5, tmp3) tmp7 = tl_math.abs(tmp3) tmp8 = -tmp7 tmp9 = tl_math.exp(tmp8) tmp10 = libdevice.log1p(tmp9) tmp11 = tmp6 - tmp10 tmp12 = tmp4 - tmp11 tmp13 = tl.broadcast_to(tmp12, [RBLOCK]) tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0)) tmp18 = tmp17 * tmp1 tmp19 = 256.0 tmp20 = tmp15 / tmp19 tmp21 = tmp20 * tmp1 tmp22 = tmp18 + tmp21 tl.debug_barrier() tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp22, None) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4,), (1,), torch.float32) buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = empty_strided_cuda((4,), (1,), torch.float32) get_raw_stream(0) triton_per_fused_mul_sum_0[grid(4)](arg0_1, arg1_1, buf0, buf1, buf2, 4, 64, XBLOCK=1, num_warps=2, num_stages=1) buf3 = empty_strided_cuda((), (), torch.float32) triton_per_fused_add_clamp_div_mul_rsub_sum_1[grid(1)](buf0, buf1, buf2, buf3, 1, 4, XBLOCK=1, num_warps=2, num_stages=1) del buf0 del buf1 del buf2 buf5 = buf3 del buf3 triton_per_fused_add_binary_cross_entropy_with_logits_mul_2[grid(1)]( buf5, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1) del arg0_1 del arg1_1 return buf5, def flatten_samples(input_): """ Flattens a tensor or a variable such that the channel axis is first and the sample axis is second. The shapes are transformed as follows: (N, C, H, W) --> (C, N * H * W) (N, C, D, H, W) --> (C, N * D * H * W) (N, C) --> (C, N) The input must be atleast 2d. """ num_channels = input_.size(1) permute_axes = list(range(input_.dim())) permute_axes[0], permute_axes[1] = permute_axes[1], permute_axes[0] permuted = input_.permute(*permute_axes).contiguous() flattened = permuted.view(num_channels, -1) return flattened def dice_score(input_, target, invert=False, channelwise=True, eps=1e-07): if channelwise: input_ = flatten_samples(input_) target = flatten_samples(target) numerator = (input_ * target).sum(-1) denominator = (input_ * input_).sum(-1) + (target * target).sum(-1) channelwise_score = 2 * (numerator / denominator.clamp(min=eps)) if invert: channelwise_score = 1.0 - channelwise_score score = channelwise_score.sum() else: numerator = (input_ * target).sum() denominator = (input_ * input_).sum() + (target * target).sum() score = 2.0 * (numerator / denominator.clamp(min=eps)) if invert: score = 1.0 - score return score class BCEDiceLossWithLogitsNew(nn.Module): def __init__(self, alpha=1.0, beta=1.0, channelwise=True, eps=1e-07): super().__init__() self.alpha = alpha self.beta = beta self.channelwise = channelwise self.eps = eps self.init_kwargs = {'alpha': alpha, 'beta': beta, 'channelwise': channelwise, 'eps': self.eps} def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
JoOkuma/torch-em
BCEDiceLossWithLogits
false
667
[ "MIT" ]
0
68b723683f9013723a0e4fc8cfef1d6a2a9c9dff
https://github.com/JoOkuma/torch-em/tree/68b723683f9013723a0e4fc8cfef1d6a2a9c9dff
Net
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_6/inductor_cache/hr/chrb4easzia4dw753qymwrkwdvp5554y3k2pc6zt3dtjbxgoihzj.py # Topologically Sorted Source Nodes: [pi1], Original ATen: [aten.tanh] # Source node to ATen node mapping: # pi1 => tanh # Graph fragment: # %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%view_1,), kwargs = {}) triton_poi_fused_tanh_0 = async_compile.triton('triton_poi_fused_tanh_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[8192], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 8192 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + (x2), None) tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + (x2), tmp3, None) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4, ), (1, )) assert_size_stride(primals_6, (128, 4), (4, 1)) assert_size_stride(primals_7, (128, ), (1, )) assert_size_stride(primals_8, (1, 128), (128, 1)) assert_size_stride(primals_9, (1, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse # Topologically Sorted Source Nodes: [pi1], Original ATen: [aten.tanh] stream0 = get_raw_stream(0) triton_poi_fused_tanh_0.run(buf1, primals_2, 8192, grid=grid(8192), stream=stream0) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [logits], Original ATen: [aten.addmm] extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 128), (128, 1), torch.float32) # Topologically Sorted Source Nodes: [], Original ATen: [] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 128), (1, 4), 0), out=buf3) del primals_6 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf3 # reuse # Topologically Sorted Source Nodes: [v1], Original ATen: [aten.tanh] triton_poi_fused_tanh_0.run(buf4, primals_7, 8192, grid=grid(8192), stream=stream0) del primals_7 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) # Topologically Sorted Source Nodes: [values], Original ATen: [aten.addmm] extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (64, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_9 return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf1, buf4, primals_8, primals_4, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) primals_4 = rand_strided((4, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_6 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32) primals_8 = rand_strided((1, 128), (128, 1), device='cuda:0', dtype=torch.float32) primals_9 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime.triton_helpers import libdevice import torch.nn as nn import torch.nn.functional as F assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_tanh_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr ): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x2 = xindex x0 = xindex % 128 tmp0 = tl.load(in_out_ptr0 + x2, None) tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = libdevice.tanh(tmp2) tl.store(in_out_ptr0 + x2, tmp3, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9) = args args.clear() assert_size_stride(primals_1, (128, 4), (4, 1)) assert_size_stride(primals_2, (128,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4, 128), (128, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (128, 4), (4, 1)) assert_size_stride(primals_7, (128,), (1,)) assert_size_stride(primals_8, (1, 128), (128, 1)) assert_size_stride(primals_9, (1,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf0 get_raw_stream(0) triton_poi_fused_tanh_0[grid(8192)](buf1, primals_2, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_2 buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 4), (1, 128), 0), alpha=1, beta=1, out=buf2) del primals_5 buf3 = empty_strided_cuda((64, 128), (128, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 128), (1, 4), 0), out=buf3) del primals_6 buf4 = reinterpret_tensor(buf3, (4, 4, 4, 128), (2048, 512, 128, 1), 0) del buf3 triton_poi_fused_tanh_0[grid(8192)](buf4, primals_7, 8192, XBLOCK= 128, num_warps=4, num_stages=1) del primals_7 buf6 = empty_strided_cuda((64, 1), (1, 1), torch.float32) extern_kernels.addmm(primals_9, reinterpret_tensor(buf4, (64, 128), (128, 1), 0), reinterpret_tensor(primals_8, (128, 1), (1, 128), 0), alpha=1, beta=1, out=buf6) del primals_9 return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0 ), reinterpret_tensor(buf6, (4, 4, 4, 1), (16, 4, 1, 1), 0 ), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0 ), buf1, buf4, primals_8, primals_4 def set_init(layers): for layer in layers: nn.init.normal_(layer.weight, mean=0.0, std=0.1) nn.init.constant_(layer.bias, 0.0) class NetNew(nn.Module): def __init__(self, s_dim, a_dim): super(NetNew, self).__init__() self.s_dim = s_dim self.a_dim = a_dim self.pi1 = nn.Linear(s_dim, 128) self.pi2 = nn.Linear(128, a_dim) self.v1 = nn.Linear(s_dim, 128) self.v2 = nn.Linear(128, 1) set_init([self.pi1, self.pi2, self.v1, self.v2]) self.distribution = torch.distributions.Categorical def choose_action(self, s): self.eval() logits, _ = self.forward(s) prob = F.softmax(logits, dim=1).data m = self.distribution(prob) return m.sample().numpy()[0] def loss_func(self, s, a, v_t): self.train() logits, values = self.forward(s) td = v_t - values c_loss = td.pow(2) probs = F.softmax(logits, dim=1) m = self.distribution(probs) exp_v = m.log_prob(a) * td.detach().squeeze() a_loss = -exp_v total_loss = (c_loss + a_loss).mean() return total_loss def forward(self, input_0): primals_1 = self.pi1.weight primals_2 = self.pi1.bias primals_4 = self.pi2.weight primals_5 = self.pi2.bias primals_6 = self.v1.weight primals_7 = self.v1.bias primals_8 = self.v2.weight primals_9 = self.v2.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9]) return output[0], output[1]
SeungyounShin/pytorch-A3C
Net
false
1,040
[ "MIT" ]
0
acb9c05a5e1a697c48a7d4c1a48b1c86326faf91
https://github.com/SeungyounShin/pytorch-A3C/tree/acb9c05a5e1a697c48a7d4c1a48b1c86326faf91
decoder3
import torch import torch.nn as nn class decoder3(nn.Module): def __init__(self): super(decoder3, self).__init__() self.reflecPad7 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv7 = nn.Conv2d(256, 128, 3, 1, 0) self.relu7 = nn.ReLU(inplace=True) self.unpool = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad8 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv8 = nn.Conv2d(128, 128, 3, 1, 0) self.relu8 = nn.ReLU(inplace=True) self.reflecPad9 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv9 = nn.Conv2d(128, 64, 3, 1, 0) self.relu9 = nn.ReLU(inplace=True) self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad10 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv10 = nn.Conv2d(64, 64, 3, 1, 0) self.relu10 = nn.ReLU(inplace=True) self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv11 = nn.Conv2d(64, 3, 3, 1, 0) def forward(self, x): out = self.reflecPad7(x) out = self.conv7(out) out = self.relu7(out) out = self.unpool(out) out = self.reflecPad8(out) out = self.conv8(out) out = self.relu8(out) out = self.reflecPad9(out) out = self.conv9(out) out_relu9 = self.relu9(out) out = self.unpool2(out_relu9) out = self.reflecPad10(out) out = self.conv10(out) out = self.relu10(out) out = self.reflecPad11(out) out = self.conv11(out) return out def get_inputs(): return [torch.rand([4, 256, 4, 4])] def get_init_inputs(): return [[], {}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 6 x1 = xindex // 6 % 6 x2 = xindex // 36 x3 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 + x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2), None, eviction_policy='evict_last') tl.store(out_ptr0 + x3, tmp0, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_1(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 8 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x1 = xindex // 10 % 10 x0 = xindex % 10 x4 = xindex // 100 x2 = xindex // 100 % 128 x7 = xindex tmp0 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x1 ))), None, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (7 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0 ))), None, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 4 * tmp4 + 16 * x4), None, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, None) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x0 = xindex % 10 x1 = xindex // 10 % 10 x4 = xindex // 100 x2 = xindex // 100 % 128 x5 = xindex tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-7 + tl_math.abs(-1 + x0)) + -8 * tl_math.abs(-7 + tl_math.abs(-1 + x1)) + 64 * x4), None, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, None) @triton.jit def triton_poi_fused__to_copy_add_arange_mul_4(out_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = x0 tmp1 = tmp0.to(tl.float32) tmp2 = 0.5 tmp3 = tmp1 * tmp2 tmp4 = tmp3.to(tl.int32) tl.store(out_ptr0 + x0, tmp4, xmask) @triton.jit def triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 82944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 18 % 18 x0 = xindex % 18 x4 = xindex // 324 x2 = xindex // 324 % 64 x7 = xindex tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x1))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0))), xmask, eviction_policy='evict_last') tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 8, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tmp6 = tmp5 + tmp1 tmp7 = tmp5 < 0 tmp8 = tl.where(tmp7, tmp6, tmp5) tmp9 = tl.load(in_ptr1 + (tmp8 + 8 * tmp4 + 64 * x4), xmask, eviction_policy='evict_last') tmp11 = tmp9 + tmp10 tmp12 = tl.full([1], 0, tl.int32) tmp13 = triton_helpers.maximum(tmp12, tmp11) tl.store(out_ptr0 + x7, tmp13, xmask) @triton.jit def triton_poi_fused_convolution_reflection_pad2d_relu_6(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 82944 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % 18 x1 = xindex // 18 % 18 x4 = xindex // 324 x2 = xindex // 324 % 64 x5 = xindex tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 + x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x4), xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(out_ptr0 + x5, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl .constexpr): xnumel = 3072 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 256 % 3 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x3, tmp2, xmask) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 256 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_9(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 64 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_10(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 64 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) @triton.jit def triton_poi_fused_convolution_relu_threshold_backward_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] tl.full([XBLOCK], True, tl.int1) x3 = xindex x1 = xindex // 16 % 128 tmp0 = tl.load(in_ptr0 + x3, None) tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tmp5 = 0.0 tmp6 = tmp4 <= tmp5 tl.store(out_ptr0 + x3, tmp6, None) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11) = args args.clear() assert_size_stride(primals_1, (4, 256, 4, 4), (4096, 16, 4, 1)) assert_size_stride(primals_2, (128, 256, 3, 3), (2304, 9, 3, 1)) assert_size_stride(primals_3, (128,), (1,)) assert_size_stride(primals_4, (128, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_5, (128,), (1,)) assert_size_stride(primals_6, (64, 128, 3, 3), (1152, 9, 3, 1)) assert_size_stride(primals_7, (64,), (1,)) assert_size_stride(primals_8, (64, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_9, (64,), (1,)) assert_size_stride(primals_10, (3, 64, 3, 3), (576, 9, 3, 1)) assert_size_stride(primals_11, (3,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 256, 6, 6), (9216, 36, 6, 1), torch. float32) get_raw_stream(0) triton_poi_fused_reflection_pad2d_0[grid(36864)](primals_1, buf0, 36864, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 128, 4, 4), (2048, 16, 4, 1)) buf2 = empty_strided_cuda((8,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_1[grid(8)](buf2, 8, XBLOCK =8, num_warps=1, num_stages=1) buf3 = empty_strided_cuda((4, 128, 10, 10), (12800, 100, 10, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_2[grid (51200)](buf2, buf1, primals_3, buf3, 51200, XBLOCK=512, num_warps=4, num_stages=1) buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf4, (4, 128, 8, 8), (8192, 64, 8, 1)) buf5 = empty_strided_cuda((4, 128, 10, 10), (12800, 100, 10, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_3[grid(51200)](buf4, primals_5, buf5, 51200, XBLOCK=512, num_warps=4, num_stages=1) buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf6, (4, 64, 8, 8), (4096, 64, 8, 1)) buf7 = empty_strided_cuda((16,), (1,), torch.int64) triton_poi_fused__to_copy_add_arange_mul_4[grid(16)](buf7, 16, XBLOCK=16, num_warps=1, num_stages=1) buf8 = empty_strided_cuda((4, 64, 18, 18), (20736, 324, 18, 1), torch.float32) triton_poi_fused__unsafe_index_convolution_reflection_pad2d_relu_5[grid (82944)](buf7, buf6, primals_7, buf8, 82944, XBLOCK=512, num_warps=8, num_stages=1) buf9 = extern_kernels.convolution(buf8, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf9, (4, 64, 16, 16), (16384, 256, 16, 1)) buf10 = empty_strided_cuda((4, 64, 18, 18), (20736, 324, 18, 1), torch.float32) triton_poi_fused_convolution_reflection_pad2d_relu_6[grid(82944)](buf9, primals_9, buf10, 82944, XBLOCK=512, num_warps=8, num_stages=1) buf11 = extern_kernels.convolution(buf10, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf11, (4, 3, 16, 16), (768, 256, 16, 1)) buf12 = buf11 del buf11 triton_poi_fused_convolution_7[grid(3072)](buf12, primals_11, 3072, XBLOCK=256, num_warps=4, num_stages=1) del primals_11 buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_8[grid(65536)]( buf9, primals_9, buf13, 65536, XBLOCK=512, num_warps=4, num_stages=1) del buf9 del primals_9 buf14 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.bool) triton_poi_fused_convolution_relu_threshold_backward_9[grid(16384)]( buf6, primals_7, buf14, 16384, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del primals_7 buf15 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_10[grid(32768)]( buf4, primals_5, buf15, 32768, XBLOCK=256, num_warps=4, num_stages=1) del buf4 del primals_5 buf16 = empty_strided_cuda((4, 128, 4, 4), (2048, 16, 4, 1), torch.bool ) triton_poi_fused_convolution_relu_threshold_backward_11[grid(8192)]( buf1, primals_3, buf16, 8192, XBLOCK=256, num_warps=4, num_stages=1 ) del buf1 del primals_3 return (buf12, primals_2, primals_4, primals_6, primals_8, primals_10, buf0, buf2, buf3, buf5, buf7, buf8, buf10, buf13, buf14, buf15, buf16) class decoder3New(nn.Module): def __init__(self): super(decoder3New, self).__init__() self.reflecPad7 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv7 = nn.Conv2d(256, 128, 3, 1, 0) self.relu7 = nn.ReLU(inplace=True) self.unpool = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad8 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv8 = nn.Conv2d(128, 128, 3, 1, 0) self.relu8 = nn.ReLU(inplace=True) self.reflecPad9 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv9 = nn.Conv2d(128, 64, 3, 1, 0) self.relu9 = nn.ReLU(inplace=True) self.unpool2 = nn.UpsamplingNearest2d(scale_factor=2) self.reflecPad10 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv10 = nn.Conv2d(64, 64, 3, 1, 0) self.relu10 = nn.ReLU(inplace=True) self.reflecPad11 = nn.ReflectionPad2d((1, 1, 1, 1)) self.conv11 = nn.Conv2d(64, 3, 3, 1, 0) def forward(self, input_0): primals_2 = self.conv7.weight primals_3 = self.conv7.bias primals_4 = self.conv8.weight primals_5 = self.conv8.bias primals_6 = self.conv9.weight primals_7 = self.conv9.bias primals_8 = self.conv10.weight primals_9 = self.conv10.bias primals_10 = self.conv11.weight primals_11 = self.conv11.bias primals_1 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11]) return output[0]
cy-xu/LinearStyleTransfer
decoder3
false
6,540
[ "BSD-2-Clause" ]
1
a07ab32db037f60a122e252588d6bd504b7d70d7
https://github.com/cy-xu/LinearStyleTransfer/tree/a07ab32db037f60a122e252588d6bd504b7d70d7
LinearExcitability
# AOT ID: ['0_forward'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_2/inductor_cache/2m/c2m7w5cdq5nalq4nyaoqfojwxnh34fsgwmz4enw3ifoatg6tbyn6.py # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add, aten.view] # Source node to ATen node mapping: # output_1 => add, view_3 # Graph fragment: # %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_2), kwargs = {}) # %view_3 : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%view_2, [4, 4, 4, 4]), kwargs = {}) triton_poi_fused_add_view_0 = async_compile.triton('triton_poi_fused_add_view_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[256], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=90, major=9, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=132), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_view_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'B098E03CDA7B8ADC90DAFFDF24A2956451D1B13F297756A5DCC209498AA53705', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + (x4), xmask) tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + (x4), tmp2, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4, ), (1, )) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [output], Original ATen: [aten.mm] extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [output_1], Original ATen: [aten.add, aten.view] stream0 = get_raw_stream(0) triton_poi_fused_add_view_0.run(buf2, primals_2, 256, grid=grid(256), stream=stream0) del primals_2 return (buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32) primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32) primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32) fn = lambda: call([primals_1, primals_2, primals_3]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math from torch import nn from torch.nn.parameter import Parameter assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_add_view_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x4 = xindex x0 = xindex % 4 tmp0 = tl.load(in_out_ptr0 + x4, xmask) tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tl.store(in_out_ptr0 + x4, tmp2, xmask) def call(args): primals_1, primals_2, primals_3 = args args.clear() assert_size_stride(primals_1, (4, 4), (4, 1)) assert_size_stride(primals_2, (4,), (1,)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32) extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0) del primals_1 buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0) del buf0 buf2 = buf1 del buf1 get_raw_stream(0) triton_poi_fused_add_view_0[grid(256)](buf2, primals_2, 256, XBLOCK =128, num_warps=4, num_stages=1) del primals_2 return buf2, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0) def linearExcitability(input, weight, excitability=None, bias=None): """ Applies a linear transformation to the incoming data: :math:`y = c(xA^T) + b`. Shape: - input: :math:`(N, *, in\\_features)` - weight: :math:`(out\\_features, in\\_features)` - excitability: :math:`(out\\_features)` - bias: :math:`(out\\_features)` - output: :math:`(N, *, out\\_features)` (NOTE: `*` means any number of additional dimensions) """ if excitability is not None: output = input.matmul(weight.t()) * excitability else: output = input.matmul(weight.t()) if bias is not None: output += bias return output class LinearExcitabilityNew(nn.Module): """Applies a linear transformation to the incoming data: :math:`y = c(Ax) + b` Args: in_features: size of each input sample out_features: size of each output sample bias: if 'False', layer will not learn an additive bias-parameter (DEFAULT=True) excitability: if 'False', layer will not learn a multiplicative excitability-parameter (DEFAULT=True) Shape: - input: :math:`(N, *, in\\_features)` where `*` means any number of additional dimensions - output: :math:`(N, *, out\\_features)` where all but the last dimension are the same shape as the input. Attributes: weight: the learnable weights of the module of shape (out_features x in_features) excitability: the learnable multiplication terms (out_features) bias: the learnable bias of the module of shape (out_features) excit_buffer: fixed multiplication variable (out_features) Examples:: >>> m = LinearExcitability(20, 30) >>> input = autograd.Variable(torch.randn(128, 20)) >>> output = m(input) >>> print(output.size()) """ def __init__(self, in_features, out_features, bias=True, excitability= False, excit_buffer=False): super(LinearExcitabilityNew, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features)) if excitability: self.excitability = Parameter(torch.Tensor(out_features)) else: self.register_parameter('excitability', None) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) if excit_buffer: buffer = torch.Tensor(out_features).uniform_(1, 1) self.register_buffer('excit_buffer', buffer) else: self.register_buffer('excit_buffer', None) self.reset_parameters() def reset_parameters(self): """Modifies the parameters "in-place" to reset them at appropriate initialization values""" stdv = 1.0 / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.excitability is not None: self.excitability.data.uniform_(1, 1) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def __repr__(self): return self.__class__.__name__ + '(' + 'in_features=' + str(self. in_features) + ', out_features=' + str(self.out_features) + ')' def forward(self, input_0): primals_1 = self.weight primals_2 = self.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3]) return output[0]
GMvandeVen/progressive-learning-pytorch
LinearExcitability
false
17,285
[ "MIT" ]
4
165645b2d7595d94d036f765c9a311d505e667a3
https://github.com/GMvandeVen/progressive-learning-pytorch/tree/165645b2d7595d94d036f765c9a311d505e667a3
Perplexity
# AOT ID: ['0_inference'] from ctypes import c_void_p, c_long, c_int import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty_strided from torch._inductor.async_compile import AsyncCompile from torch._inductor.select_algorithm import extern_kernels from torch._inductor.codegen.multi_kernel import MultiKernelCall import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_raw_stream aten = torch.ops.aten inductor_ops = torch.ops.inductor _quantized = torch.ops._quantized assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor alloc_from_pool = torch.ops.inductor._alloc_from_pool async_compile = AsyncCompile() # kernel path: runs/run_shard_0/inductor_cache/65/c65frvogjvzvcjnoj7n72ziopkhhgusygsvovz7h4ukukiilkzeo.py # Topologically Sorted Source Nodes: [log_probs], Original ATen: [aten._log_softmax] # Source node to ATen node mapping: # log_probs => amax, sub # Graph fragment: # %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {}) # %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {}) triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[64], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = (xindex // 4) tmp0 = tl.load(in_ptr0 + (x2), xmask) tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + (x2), tmp8, xmask) ''', device_str='cuda') # kernel path: runs/run_shard_0/inductor_cache/nb/cnbpd2lnpuggtoo64pjekvrld53vklqd25jk2g6wbao26doipwvl.py # Topologically Sorted Source Nodes: [out, mean, ppl], Original ATen: [aten.neg, aten.mean, aten.exp] # Source node to ATen node mapping: # mean => mean # out => neg # ppl => exp_1 # Graph fragment: # %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%squeeze,), kwargs = {}) # %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%neg, [1]), kwargs = {}) # %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mean,), kwargs = {}) triton_poi_fused_exp_mean_neg_1 = async_compile.triton('triton_poi_fused_exp_mean_neg_1', ''' import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_mean_neg_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_exp_mean_neg_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + (16*x0), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + (16*x0)), xmask, eviction_policy='evict_last') tmp12 = tl.load(in_ptr1 + (2 + (16*x0)), xmask, eviction_policy='evict_last') tmp15 = tl.load(in_ptr1 + (3 + (16*x0)), xmask, eviction_policy='evict_last') tmp21 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last') tmp27 = tl.load(in_ptr1 + (4 + (16*x0)), xmask, eviction_policy='evict_last') tmp29 = tl.load(in_ptr1 + (5 + (16*x0)), xmask, eviction_policy='evict_last') tmp32 = tl.load(in_ptr1 + (6 + (16*x0)), xmask, eviction_policy='evict_last') tmp35 = tl.load(in_ptr1 + (7 + (16*x0)), xmask, eviction_policy='evict_last') tmp42 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last') tmp48 = tl.load(in_ptr1 + (8 + (16*x0)), xmask, eviction_policy='evict_last') tmp50 = tl.load(in_ptr1 + (9 + (16*x0)), xmask, eviction_policy='evict_last') tmp53 = tl.load(in_ptr1 + (10 + (16*x0)), xmask, eviction_policy='evict_last') tmp56 = tl.load(in_ptr1 + (11 + (16*x0)), xmask, eviction_policy='evict_last') tmp63 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last') tmp69 = tl.load(in_ptr1 + (12 + (16*x0)), xmask, eviction_policy='evict_last') tmp71 = tl.load(in_ptr1 + (13 + (16*x0)), xmask, eviction_policy='evict_last') tmp74 = tl.load(in_ptr1 + (14 + (16*x0)), xmask, eviction_policy='evict_last') tmp77 = tl.load(in_ptr1 + (15 + (16*x0)), xmask, eviction_policy='evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4") tmp6 = tl.load(in_ptr1 + (tmp4 + (16*x0)), xmask, eviction_policy='evict_last') tmp8 = tl_math.exp(tmp7) tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp6 - tmp18 tmp20 = -tmp19 tmp22 = tmp21 + tmp1 tmp23 = tmp21 < 0 tmp24 = tl.where(tmp23, tmp22, tmp21) tl.device_assert(((0 <= tmp24) & (tmp24 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp24 < 4") tmp26 = tl.load(in_ptr1 + (4 + tmp24 + (16*x0)), xmask, eviction_policy='evict_last') tmp28 = tl_math.exp(tmp27) tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tmp33 = tl_math.exp(tmp32) tmp34 = tmp31 + tmp33 tmp36 = tl_math.exp(tmp35) tmp37 = tmp34 + tmp36 tmp38 = tl_math.log(tmp37) tmp39 = tmp26 - tmp38 tmp40 = -tmp39 tmp41 = tmp20 + tmp40 tmp43 = tmp42 + tmp1 tmp44 = tmp42 < 0 tmp45 = tl.where(tmp44, tmp43, tmp42) tl.device_assert(((0 <= tmp45) & (tmp45 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp45 < 4") tmp47 = tl.load(in_ptr1 + (8 + tmp45 + (16*x0)), xmask, eviction_policy='evict_last') tmp49 = tl_math.exp(tmp48) tmp51 = tl_math.exp(tmp50) tmp52 = tmp49 + tmp51 tmp54 = tl_math.exp(tmp53) tmp55 = tmp52 + tmp54 tmp57 = tl_math.exp(tmp56) tmp58 = tmp55 + tmp57 tmp59 = tl_math.log(tmp58) tmp60 = tmp47 - tmp59 tmp61 = -tmp60 tmp62 = tmp41 + tmp61 tmp64 = tmp63 + tmp1 tmp65 = tmp63 < 0 tmp66 = tl.where(tmp65, tmp64, tmp63) tl.device_assert(((0 <= tmp66) & (tmp66 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp66 < 4") tmp68 = tl.load(in_ptr1 + (12 + tmp66 + (16*x0)), xmask, eviction_policy='evict_last') tmp70 = tl_math.exp(tmp69) tmp72 = tl_math.exp(tmp71) tmp73 = tmp70 + tmp72 tmp75 = tl_math.exp(tmp74) tmp76 = tmp73 + tmp75 tmp78 = tl_math.exp(tmp77) tmp79 = tmp76 + tmp78 tmp80 = tl_math.log(tmp79) tmp81 = tmp68 - tmp80 tmp82 = -tmp81 tmp83 = tmp62 + tmp82 tmp84 = 4.0 tmp85 = tmp83 / tmp84 tmp86 = tl_math.exp(tmp85) tl.store(in_out_ptr0 + (x0), tmp86, xmask) ''', device_str='cuda') async_compile.wait(globals()) del async_compile def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) # Topologically Sorted Source Nodes: [log_probs], Original ATen: [aten._log_softmax] stream0 = get_raw_stream(0) triton_poi_fused__log_softmax_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0) del arg0_1 buf1 = empty_strided_cuda((4, ), (1, ), torch.float32) buf2 = buf1; del buf1 # reuse # Topologically Sorted Source Nodes: [out, mean, ppl], Original ATen: [aten.neg, aten.mean, aten.exp] triton_poi_fused_exp_mean_neg_1.run(buf2, arg1_1, buf0, 4, grid=grid(4), stream=stream0) del arg1_1 del buf0 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32) arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64) fn = lambda: call([arg0_1, arg1_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import math as tl_math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 64 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x1 = xindex // 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last') tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last') tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last') tmp3 = triton_helpers.maximum(tmp1, tmp2) tmp5 = triton_helpers.maximum(tmp3, tmp4) tmp7 = triton_helpers.maximum(tmp5, tmp6) tmp8 = tmp0 - tmp7 tl.store(out_ptr0 + x2, tmp8, xmask) @triton.jit def triton_poi_fused_exp_mean_neg_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 4 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr1 + 16 * x0, xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr1 + (1 + 16 * x0), xmask, eviction_policy='evict_last' ) tmp12 = tl.load(in_ptr1 + (2 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp15 = tl.load(in_ptr1 + (3 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp21 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp27 = tl.load(in_ptr1 + (4 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp29 = tl.load(in_ptr1 + (5 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp32 = tl.load(in_ptr1 + (6 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp35 = tl.load(in_ptr1 + (7 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp42 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp48 = tl.load(in_ptr1 + (8 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp50 = tl.load(in_ptr1 + (9 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp53 = tl.load(in_ptr1 + (10 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp56 = tl.load(in_ptr1 + (11 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp63 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last' ) tmp69 = tl.load(in_ptr1 + (12 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp71 = tl.load(in_ptr1 + (13 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp74 = tl.load(in_ptr1 + (14 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp77 = tl.load(in_ptr1 + (15 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp1 = tl.full([XBLOCK], 4, tl.int32) tmp2 = tmp0 + tmp1 tmp3 = tmp0 < 0 tmp4 = tl.where(tmp3, tmp2, tmp0) tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask, 'index out of bounds: 0 <= tmp4 < 4') tmp6 = tl.load(in_ptr1 + (tmp4 + 16 * x0), xmask, eviction_policy= 'evict_last') tmp8 = tl_math.exp(tmp7) tmp10 = tl_math.exp(tmp9) tmp11 = tmp8 + tmp10 tmp13 = tl_math.exp(tmp12) tmp14 = tmp11 + tmp13 tmp16 = tl_math.exp(tmp15) tmp17 = tmp14 + tmp16 tmp18 = tl_math.log(tmp17) tmp19 = tmp6 - tmp18 tmp20 = -tmp19 tmp22 = tmp21 + tmp1 tmp23 = tmp21 < 0 tmp24 = tl.where(tmp23, tmp22, tmp21) tl.device_assert((0 <= tmp24) & (tmp24 < 4) | ~xmask, 'index out of bounds: 0 <= tmp24 < 4') tmp26 = tl.load(in_ptr1 + (4 + tmp24 + 16 * x0), xmask, eviction_policy ='evict_last') tmp28 = tl_math.exp(tmp27) tmp30 = tl_math.exp(tmp29) tmp31 = tmp28 + tmp30 tmp33 = tl_math.exp(tmp32) tmp34 = tmp31 + tmp33 tmp36 = tl_math.exp(tmp35) tmp37 = tmp34 + tmp36 tmp38 = tl_math.log(tmp37) tmp39 = tmp26 - tmp38 tmp40 = -tmp39 tmp41 = tmp20 + tmp40 tmp43 = tmp42 + tmp1 tmp44 = tmp42 < 0 tmp45 = tl.where(tmp44, tmp43, tmp42) tl.device_assert((0 <= tmp45) & (tmp45 < 4) | ~xmask, 'index out of bounds: 0 <= tmp45 < 4') tmp47 = tl.load(in_ptr1 + (8 + tmp45 + 16 * x0), xmask, eviction_policy ='evict_last') tmp49 = tl_math.exp(tmp48) tmp51 = tl_math.exp(tmp50) tmp52 = tmp49 + tmp51 tmp54 = tl_math.exp(tmp53) tmp55 = tmp52 + tmp54 tmp57 = tl_math.exp(tmp56) tmp58 = tmp55 + tmp57 tmp59 = tl_math.log(tmp58) tmp60 = tmp47 - tmp59 tmp61 = -tmp60 tmp62 = tmp41 + tmp61 tmp64 = tmp63 + tmp1 tmp65 = tmp63 < 0 tmp66 = tl.where(tmp65, tmp64, tmp63) tl.device_assert((0 <= tmp66) & (tmp66 < 4) | ~xmask, 'index out of bounds: 0 <= tmp66 < 4') tmp68 = tl.load(in_ptr1 + (12 + tmp66 + 16 * x0), xmask, eviction_policy='evict_last') tmp70 = tl_math.exp(tmp69) tmp72 = tl_math.exp(tmp71) tmp73 = tmp70 + tmp72 tmp75 = tl_math.exp(tmp74) tmp76 = tmp73 + tmp75 tmp78 = tl_math.exp(tmp77) tmp79 = tmp76 + tmp78 tmp80 = tl_math.log(tmp79) tmp81 = tmp68 - tmp80 tmp82 = -tmp81 tmp83 = tmp62 + tmp82 tmp84 = 4.0 tmp85 = tmp83 / tmp84 tmp86 = tl_math.exp(tmp85) tl.store(in_out_ptr0 + x0, tmp86, xmask) def call(args): arg0_1, arg1_1 = args args.clear() assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1)) assert_size_stride(arg1_1, (4, 4), (4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32) get_raw_stream(0) triton_poi_fused__log_softmax_0[grid(64)](arg0_1, buf0, 64, XBLOCK= 64, num_warps=1, num_stages=1) del arg0_1 buf1 = empty_strided_cuda((4,), (1,), torch.float32) buf2 = buf1 del buf1 triton_poi_fused_exp_mean_neg_1[grid(4)](buf2, arg1_1, buf0, 4, XBLOCK=4, num_warps=1, num_stages=1) del arg1_1 del buf0 return buf2, class PerplexityNew(nn.Module): def __init__(self): super(PerplexityNew, self).__init__() def forward(self, input_0, input_1): arg0_1 = input_0 arg1_1 = input_1 output = call([arg0_1, arg1_1]) return output[0]
kefirski/contiguous-succotash
Perplexity
false
15,804
[ "MIT" ]
57
7497efd1392693248ed98805dcdbbf5dc125afc2
https://github.com/kefirski/contiguous-succotash/tree/7497efd1392693248ed98805dcdbbf5dc125afc2
AdjustLog
from torch.nn import Module import torch from torch import Tensor from typing import Optional def KORNIA_CHECK_IS_TENSOR(x, msg: 'Optional[str]'=None): if not isinstance(x, Tensor): raise TypeError(f'Not a Tensor type. Got: {type(x)}.\n{msg}') def adjust_log(image: 'Tensor', gain: 'float'=1, inv: 'bool'=False, clip_output: 'bool'=True) ->Tensor: """Adjust log correction on the input image tensor. The input image is expected to be in the range of [0, 1]. Reference: [1]: http://www.ece.ucsb.edu/Faculty/Manjunath/courses/ece178W03/EnhancePart1.pdf Args: image: Image to be adjusted in the shape of :math:`(*, H, W)`. gain: The multiplier of logarithmic function. inv: If is set to True the function will return the inverse logarithmic correction. clip_output: Whether to clip the output image with range of [0, 1]. Returns: Adjusted tensor in the shape of :math:`(*, H, W)`. Example: >>> x = torch.zeros(1, 1, 2, 2) >>> adjust_log(x, inv=True) tensor([[[[0., 0.], [0., 0.]]]]) """ KORNIA_CHECK_IS_TENSOR(image, 'Expected shape (*, H, W)') if inv: img_adjust = (2 ** image - 1) * gain else: img_adjust = (1 + image).log2() * gain if clip_output: img_adjust = img_adjust.clamp(min=0.0, max=1.0) return img_adjust class AdjustLog(Module): """Adjust log correction on the input image tensor. The input image is expected to be in the range of [0, 1]. Reference: [1]: http://www.ece.ucsb.edu/Faculty/Manjunath/courses/ece178W03/EnhancePart1.pdf Args: image: Image to be adjusted in the shape of :math:`(*, H, W)`. gain: The multiplier of logarithmic function. inv: If is set to True the function will return the inverse logarithmic correction. clip_output: Whether to clip the output image with range of [0, 1]. Example: >>> x = torch.zeros(1, 1, 2, 2) >>> AdjustLog(inv=True)(x) tensor([[[[0., 0.], [0., 0.]]]]) """ def __init__(self, gain: 'float'=1, inv: 'bool'=False, clip_output: 'bool'=True) ->None: super().__init__() self.gain: 'float' = gain self.inv: 'bool' = inv self.clip_output: 'bool' = clip_output def forward(self, image: 'Tensor') ->Tensor: return adjust_log(image, gain=self.gain, inv=self.inv, clip_output= self.clip_output) def get_inputs(): return [torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {}]
import torch import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers from torch._inductor.runtime.triton_helpers import libdevice from torch.nn import Module from torch import Tensor from typing import Optional assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_add_clamp_log2_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = 1.0 tmp2 = tmp0 + tmp1 tmp3 = libdevice.log2(tmp2) tmp4 = tmp3 * tmp1 tmp5 = 0.0 tmp6 = triton_helpers.maximum(tmp4, tmp5) tmp7 = triton_helpers.minimum(tmp6, tmp1) tl.store(out_ptr0 + x0, tmp7, xmask) def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_add_clamp_log2_mul_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del arg0_1 return buf0, def KORNIA_CHECK_IS_TENSOR(x, msg: 'Optional[str]'=None): if not isinstance(x, Tensor): raise TypeError(f'Not a Tensor type. Got: {type(x)}.\n{msg}') def adjust_log(image: 'Tensor', gain: 'float'=1, inv: 'bool'=False, clip_output: 'bool'=True) ->Tensor: """Adjust log correction on the input image tensor. The input image is expected to be in the range of [0, 1]. Reference: [1]: http://www.ece.ucsb.edu/Faculty/Manjunath/courses/ece178W03/EnhancePart1.pdf Args: image: Image to be adjusted in the shape of :math:`(*, H, W)`. gain: The multiplier of logarithmic function. inv: If is set to True the function will return the inverse logarithmic correction. clip_output: Whether to clip the output image with range of [0, 1]. Returns: Adjusted tensor in the shape of :math:`(*, H, W)`. Example: >>> x = torch.zeros(1, 1, 2, 2) >>> adjust_log(x, inv=True) tensor([[[[0., 0.], [0., 0.]]]]) """ KORNIA_CHECK_IS_TENSOR(image, 'Expected shape (*, H, W)') if inv: img_adjust = (2 ** image - 1) * gain else: img_adjust = (1 + image).log2() * gain if clip_output: img_adjust = img_adjust.clamp(min=0.0, max=1.0) return img_adjust class AdjustLogNew(Module): """Adjust log correction on the input image tensor. The input image is expected to be in the range of [0, 1]. Reference: [1]: http://www.ece.ucsb.edu/Faculty/Manjunath/courses/ece178W03/EnhancePart1.pdf Args: image: Image to be adjusted in the shape of :math:`(*, H, W)`. gain: The multiplier of logarithmic function. inv: If is set to True the function will return the inverse logarithmic correction. clip_output: Whether to clip the output image with range of [0, 1]. Example: >>> x = torch.zeros(1, 1, 2, 2) >>> AdjustLog(inv=True)(x) tensor([[[[0., 0.], [0., 0.]]]]) """ def __init__(self, gain: 'float'=1, inv: 'bool'=False, clip_output: 'bool'=True) ->None: super().__init__() self.gain: 'float' = gain self.inv: 'bool' = inv self.clip_output: 'bool' = clip_output def forward(self, input_0): arg0_1 = input_0 output = call([arg0_1]) return output[0]
YanivHollander/kornia
AdjustLog
false
14,632
[ "ECL-2.0", "Apache-2.0" ]
418
ccd258d0956da89b1feca96448eff8e4969d405a
https://github.com/YanivHollander/kornia/tree/ccd258d0956da89b1feca96448eff8e4969d405a
PartialConv
import math import torch import torch.nn as nn def weights_init(init_type='gaussian'): def init_fun(m): classname = m.__class__.__name__ if (classname.find('Conv') == 0 or classname.find('Linear') == 0 ) and hasattr(m, 'weight'): if init_type == 'gaussian': nn.init.normal_(m.weight, 0.0, 0.02) elif init_type == 'xavier': nn.init.xavier_normal_(m.weight, gain=math.sqrt(2)) elif init_type == 'kaiming': nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') elif init_type == 'orthogonal': nn.init.orthogonal_(m.weight, gain=math.sqrt(2)) elif init_type == 'default': pass else: assert 0, 'Unsupported initialization: {}'.format(init_type) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0.0) return init_fun class PartialConv(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__() self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, False) self.input_conv.apply(weights_init('kaiming')) self.slide_winsize = in_channels * kernel_size * kernel_size torch.nn.init.constant_(self.mask_conv.weight, 1.0) for param in self.mask_conv.parameters(): param.requires_grad = False def forward(self, input, mask): output = self.input_conv(input * mask) if self.input_conv.bias is not None: output_bias = self.input_conv.bias.view(1, -1, 1, 1).expand_as( output) else: output_bias = torch.zeros_like(output) with torch.no_grad(): output_mask = self.mask_conv(mask) no_update_holes = output_mask == 0 mask_sum = output_mask.masked_fill_(no_update_holes, 1.0) output_pre = (output - output_bias ) * self.slide_winsize / mask_sum + output_bias output = output_pre.masked_fill_(no_update_holes, 0.0) new_mask = torch.ones_like(output) new_mask = new_mask.masked_fill_(no_update_holes, 0.0) return output, new_mask def get_inputs(): return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream import math import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda @triton.jit def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl. constexpr): xnumel = 256 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + x0, xmask) tmp1 = tl.load(in_ptr1 + x0, xmask) tmp2 = tmp0 * tmp1 tl.store(out_ptr0 + x0, tmp2, xmask) @triton.jit def triton_poi_fused_add_convolution_div_eq_masked_fill_mul_ones_like_sub_1( in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + x2, xmask) tmp3 = tl.load(in_out_ptr0 + x2, xmask) tmp4 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp1 = 0.0 tmp2 = tmp0 == tmp1 tmp5 = tmp3 + tmp4 tmp6 = tmp5 - tmp4 tmp7 = 64.0 tmp8 = tmp6 * tmp7 tmp9 = 1.0 tmp10 = tl.where(tmp2, tmp9, tmp0) tmp11 = tmp8 / tmp10 tmp12 = tmp11 + tmp4 tmp13 = tl.where(tmp2, tmp1, tmp12) tmp14 = tl.where(tmp2, tmp1, tmp9) tl.store(in_out_ptr0 + x2, tmp13, xmask) tl.store(out_ptr0 + x2, tmp14, xmask) def call(args): primals_1, primals_2, primals_3, primals_4, primals_5 = args args.clear() assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1)) assert_size_stride(primals_4, (4,), (1,)) assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32) get_raw_stream(0) triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256, XBLOCK=256, num_warps=4, num_stages=1) del primals_1 buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1)) buf2 = extern_kernels.convolution(primals_2, primals_5, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1)) del primals_2 del primals_5 buf3 = buf1 del buf1 buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32) triton_poi_fused_add_convolution_div_eq_masked_fill_mul_ones_like_sub_1[ grid(16)](buf3, buf2, primals_4, buf4, 16, XBLOCK=16, num_warps =1, num_stages=1) del primals_4 return buf3, buf4, primals_3, buf0, buf2 def weights_init(init_type='gaussian'): def init_fun(m): classname = m.__class__.__name__ if (classname.find('Conv') == 0 or classname.find('Linear') == 0 ) and hasattr(m, 'weight'): if init_type == 'gaussian': nn.init.normal_(m.weight, 0.0, 0.02) elif init_type == 'xavier': nn.init.xavier_normal_(m.weight, gain=math.sqrt(2)) elif init_type == 'kaiming': nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') elif init_type == 'orthogonal': nn.init.orthogonal_(m.weight, gain=math.sqrt(2)) elif init_type == 'default': pass else: assert 0, 'Unsupported initialization: {}'.format(init_type) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, 0.0) return init_fun class PartialConvNew(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True): super().__init__() self.input_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) self.mask_conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, False) self.input_conv.apply(weights_init('kaiming')) self.slide_winsize = in_channels * kernel_size * kernel_size torch.nn.init.constant_(self.mask_conv.weight, 1.0) for param in self.mask_conv.parameters(): param.requires_grad = False def forward(self, input_0, input_1): primals_1 = self.input_conv.weight primals_4 = self.input_conv.bias primals_2 = self.mask_conv.weight primals_3 = input_0 primals_5 = input_1 output = call([primals_1, primals_2, primals_3, primals_4, primals_5]) return output[0], output[1]
Northshoot/3d-photo-inpainting
PartialConv
false
11,772
[ "MIT" ]
0
49dd36ce4a277929831f09d978721b3fdb87eb25
https://github.com/Northshoot/3d-photo-inpainting/tree/49dd36ce4a277929831f09d978721b3fdb87eb25
Pointnet
import torch import torch.utils.data import torch.nn as nn class Pointnet(nn.Module): def __init__(self, in_channels, out_channels, hidden_dim, segmentation= False): super().__init__() self.fc_in = nn.Conv1d(in_channels, 2 * hidden_dim, 1) self.fc_0 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.fc_1 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.fc_2 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.fc_3 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.segmentation = segmentation if segmentation: self.fc_out = nn.Conv1d(2 * hidden_dim, out_channels, 1) else: self.fc_out = nn.Linear(hidden_dim, out_channels) self.activation = nn.ReLU() def forward(self, x): x = self.fc_in(x) x = self.fc_0(self.activation(x)) x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) x = self.fc_1(self.activation(x)) x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) x = self.fc_2(self.activation(x)) x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) x = self.fc_3(self.activation(x)) if self.segmentation: x_pool = torch.max(x, dim=2, keepdim=True)[0].expand_as(x) x = torch.cat([x, x_pool], dim=1) else: x = torch.max(x, dim=2)[0] x = self.fc_out(x) return x def get_inputs(): return [torch.rand([4, 4, 4])] def get_init_inputs(): return [[], {'in_channels': 4, 'out_channels': 4, 'hidden_dim': 4}]
import torch from torch._inductor.select_algorithm import extern_kernels import triton import triton.language as tl from torch._inductor.runtime.triton_heuristics import grid from torch._C import _cuda_getCurrentRawStream as get_raw_stream from torch._inductor.runtime import triton_helpers import torch.utils.data import torch.nn as nn assert_size_stride = torch._C._dynamo.guards.assert_size_stride empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor @triton.jit def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x3 = xindex x1 = xindex // 4 % 8 tmp0 = tl.load(in_out_ptr0 + x3, xmask) tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp3 = tl.full([1], 0, tl.int32) tmp4 = triton_helpers.maximum(tmp3, tmp2) tl.store(in_out_ptr0 + x3, tmp4, xmask) @triton.jit def triton_poi_fused_convolution_max_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp36 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last' ) tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = tmp2 > tmp4 tmp6 = tmp2 == tmp4 tmp7 = tmp2 != tmp2 tmp8 = tmp4 != tmp4 tmp9 = tmp7 > tmp8 tmp10 = tmp5 | tmp9 tmp11 = tmp7 & tmp8 tmp12 = tmp6 | tmp11 tmp13 = tl.full([1], 0, tl.int64) tmp14 = tl.full([1], 1, tl.int64) tmp15 = tmp13 < tmp14 tmp16 = tmp12 & tmp15 tmp17 = tmp10 | tmp16 tmp18 = tl.where(tmp17, tmp2, tmp4) tmp19 = tl.where(tmp17, tmp13, tmp14) tmp21 = tmp20 + tmp1 tmp22 = tmp18 > tmp21 tmp23 = tmp18 == tmp21 tmp24 = tmp18 != tmp18 tmp25 = tmp21 != tmp21 tmp26 = tmp24 > tmp25 tmp27 = tmp22 | tmp26 tmp28 = tmp24 & tmp25 tmp29 = tmp23 | tmp28 tmp30 = tl.full([1], 2, tl.int64) tmp31 = tmp19 < tmp30 tmp32 = tmp29 & tmp31 tmp33 = tmp27 | tmp32 tmp34 = tl.where(tmp33, tmp18, tmp21) tmp35 = tl.where(tmp33, tmp19, tmp30) tmp37 = tmp36 + tmp1 tmp38 = tmp34 > tmp37 tmp39 = tmp34 == tmp37 tmp40 = tmp34 != tmp34 tmp41 = tmp37 != tmp37 tmp42 = tmp40 > tmp41 tmp43 = tmp38 | tmp42 tmp44 = tmp40 & tmp41 tmp45 = tmp39 | tmp44 tmp46 = tl.full([1], 3, tl.int64) tmp47 = tmp35 < tmp46 tmp48 = tmp45 & tmp47 tmp49 = tmp43 | tmp48 tl.where(tmp49, tmp34, tmp37) tmp51 = tl.where(tmp49, tmp35, tmp46) tmp52 = triton_helpers.maximum(tmp2, tmp4) tmp53 = triton_helpers.maximum(tmp52, tmp21) tmp54 = triton_helpers.maximum(tmp53, tmp37) tl.store(out_ptr0 + x2, tmp51, xmask) tl.store(out_ptr1 + x2, tmp54, xmask) @triton.jit def triton_poi_fused_cat_relu_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr): xnumel = 128 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x1 = xindex // 4 % 8 x0 = xindex % 4 x2 = xindex // 32 x3 = xindex tmp0 = x1 tl.full([1], 0, tl.int64) tmp3 = tl.full([1], 4, tl.int64) tmp4 = tmp0 < tmp3 tmp5 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), tmp4 & xmask, other=0.0) tmp6 = tl.load(in_ptr1 + x1, tmp4 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype) tmp9 = tl.where(tmp4, tmp7, tmp8) tmp10 = tmp0 >= tmp3 tl.full([1], 8, tl.int64) tmp13 = tl.load(in_ptr2 + (4 * x2 + (-4 + x1)), tmp10 & xmask, eviction_policy='evict_last', other=0.0) tmp14 = tl.where(tmp4, tmp9, tmp13) tmp15 = tl.full([1], 0, tl.int32) tmp16 = triton_helpers.maximum(tmp15, tmp14) tl.store(out_ptr0 + x3, tmp16, xmask) @triton.jit def triton_poi_fused_convolution_max_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr): xnumel = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x2 = xindex x0 = xindex % 4 tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last') tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last') tmp6 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 + tmp1 tmp4 = tmp3 + tmp1 tmp5 = triton_helpers.maximum(tmp2, tmp4) tmp7 = tmp6 + tmp1 tmp8 = triton_helpers.maximum(tmp5, tmp7) tmp10 = tmp9 + tmp1 tmp11 = triton_helpers.maximum(tmp8, tmp10) tmp12 = tmp2 > tmp4 tmp13 = tmp2 == tmp4 tmp14 = tmp2 != tmp2 tmp15 = tmp4 != tmp4 tmp16 = tmp14 > tmp15 tmp17 = tmp12 | tmp16 tmp18 = tmp14 & tmp15 tmp19 = tmp13 | tmp18 tmp20 = tl.full([1], 0, tl.int64) tmp21 = tl.full([1], 1, tl.int64) tmp22 = tmp20 < tmp21 tmp23 = tmp19 & tmp22 tmp24 = tmp17 | tmp23 tmp25 = tl.where(tmp24, tmp2, tmp4) tmp26 = tl.where(tmp24, tmp20, tmp21) tmp27 = tmp25 > tmp7 tmp28 = tmp25 == tmp7 tmp29 = tmp25 != tmp25 tmp30 = tmp7 != tmp7 tmp31 = tmp29 > tmp30 tmp32 = tmp27 | tmp31 tmp33 = tmp29 & tmp30 tmp34 = tmp28 | tmp33 tmp35 = tl.full([1], 2, tl.int64) tmp36 = tmp26 < tmp35 tmp37 = tmp34 & tmp36 tmp38 = tmp32 | tmp37 tmp39 = tl.where(tmp38, tmp25, tmp7) tmp40 = tl.where(tmp38, tmp26, tmp35) tmp41 = tmp39 > tmp10 tmp42 = tmp39 == tmp10 tmp43 = tmp39 != tmp39 tmp44 = tmp10 != tmp10 tmp45 = tmp43 > tmp44 tmp46 = tmp41 | tmp45 tmp47 = tmp43 & tmp44 tmp48 = tmp42 | tmp47 tmp49 = tl.full([1], 3, tl.int64) tmp50 = tmp40 < tmp49 tmp51 = tmp48 & tmp50 tmp52 = tmp46 | tmp51 tl.where(tmp52, tmp39, tmp10) tmp54 = tl.where(tmp52, tmp40, tmp49) tl.store(out_ptr0 + x2, tmp11, xmask) tl.store(out_ptr1 + x2, tmp54, xmask) def call(args): (primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13) = args args.clear() assert_size_stride(primals_1, (8, 4, 1), (4, 1, 1)) assert_size_stride(primals_2, (8,), (1,)) assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1)) assert_size_stride(primals_4, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_5, (4,), (1,)) assert_size_stride(primals_6, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_7, (4,), (1,)) assert_size_stride(primals_8, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_9, (4,), (1,)) assert_size_stride(primals_10, (4, 8, 1), (8, 1, 1)) assert_size_stride(primals_11, (4,), (1,)) assert_size_stride(primals_12, (4, 4), (4, 1)) assert_size_stride(primals_13, (4,), (1,)) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf0, (4, 8, 4), (32, 4, 1)) buf1 = buf0 del buf0 get_raw_stream(0) triton_poi_fused_convolution_relu_0[grid(128)](buf1, primals_2, 128, XBLOCK=128, num_warps=4, num_stages=1) del primals_2 buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf2, (4, 4, 4), (16, 4, 1)) buf3 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.int64) buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32) triton_poi_fused_convolution_max_1[grid(16)](buf2, primals_5, buf3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1) buf5 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_relu_2[grid(128)](buf2, primals_5, buf4, buf5, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf2 del primals_5 buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf6, (4, 4, 4), (16, 4, 1)) buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.int64) buf8 = buf4 del buf4 triton_poi_fused_convolution_max_1[grid(16)](buf6, primals_7, buf7, buf8, 16, XBLOCK=16, num_warps=1, num_stages=1) buf9 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_relu_2[grid(128)](buf6, primals_7, buf8, buf9, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf6 del primals_7 buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf10, (4, 4, 4), (16, 4, 1)) buf11 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.int64) buf12 = buf8 del buf8 triton_poi_fused_convolution_max_1[grid(16)](buf10, primals_9, buf11, buf12, 16, XBLOCK=16, num_warps=1, num_stages=1) buf13 = empty_strided_cuda((4, 8, 4), (32, 4, 1), torch.float32) triton_poi_fused_cat_relu_2[grid(128)](buf10, primals_9, buf12, buf13, 128, XBLOCK=128, num_warps=4, num_stages=1) del buf10 del primals_9 buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=( 0,), groups=1, bias=None) assert_size_stride(buf14, (4, 4, 4), (16, 4, 1)) buf15 = reinterpret_tensor(buf12, (4, 4), (4, 1), 0) del buf12 buf16 = empty_strided_cuda((4, 4), (4, 1), torch.int64) triton_poi_fused_convolution_max_3[grid(16)](buf14, primals_11, buf15, buf16, 16, XBLOCK=16, num_warps=1, num_stages=1) del buf14 del primals_11 buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32) extern_kernels.addmm(primals_13, buf15, reinterpret_tensor( primals_12, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf17) del primals_13 return (buf17, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf9, buf11, buf13, buf15, primals_12, reinterpret_tensor(buf16, (4, 4, 1), (4, 1, 1), 0)) class PointnetNew(nn.Module): def __init__(self, in_channels, out_channels, hidden_dim, segmentation= False): super().__init__() self.fc_in = nn.Conv1d(in_channels, 2 * hidden_dim, 1) self.fc_0 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.fc_1 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.fc_2 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.fc_3 = nn.Conv1d(2 * hidden_dim, hidden_dim, 1) self.segmentation = segmentation if segmentation: self.fc_out = nn.Conv1d(2 * hidden_dim, out_channels, 1) else: self.fc_out = nn.Linear(hidden_dim, out_channels) self.activation = nn.ReLU() def forward(self, input_0): primals_1 = self.fc_in.weight primals_2 = self.fc_in.bias primals_4 = self.fc_0.weight primals_5 = self.fc_0.bias primals_6 = self.fc_1.weight primals_7 = self.fc_1.bias primals_8 = self.fc_2.weight primals_9 = self.fc_2.bias primals_10 = self.fc_3.weight primals_11 = self.fc_3.bias primals_12 = self.fc_out.weight primals_13 = self.fc_out.bias primals_3 = input_0 output = call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13]) return output[0]
StructuralNeurobiologyLab/LightConvPoint
Pointnet
false
14,458
[ "Apache-2.0" ]
58
3f353f45e9e910fa390a74520dfd478e3e88f104
https://github.com/StructuralNeurobiologyLab/LightConvPoint/tree/3f353f45e9e910fa390a74520dfd478e3e88f104